source
stringlengths
3
92
c
stringlengths
26
2.25M
lin_algebra_moist.c
/* This source file is part of GAME-DA, which is released under the MIT license. Github repository: https://github.com/OpenNWP/GAME-DA */ /* linear algebra functions for the moist assimilation process */ #include <stdlib.h> #include <stdio.h> #include "game-da.h" int permute_lines_moist(double [][NO_OF_CHOSEN_OBSERVATIONS_MOIST], int, int); int inv_gauss_moist(double to_be_inverted[][NO_OF_CHOSEN_OBSERVATIONS_MOIST], double inv[][NO_OF_CHOSEN_OBSERVATIONS_MOIST]) { /* This function computes the inverse inv of the matrix to_be_inverted, using the Gauss scheme. CAUTION: in the process, to_be_inverted will be modified. */ // firstly, the inverse is initialized with the unity matrix #pragma omp parallel for for (int i = 0; i < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++i) { inv[i][i] = 1; } /* Gaussian downwards ------------------ we will start to modify to_be_inverted now (misuse of name) */ int permute_index_found, permute_index_counter; double factor; for (int i = 0; i < NO_OF_CHOSEN_OBSERVATIONS_MOIST - 1; ++i) { /* checking if a permutation is necessary */ // Firstly, the permutation index has to be found. permute_index_found = 0; permute_index_counter = i; while (permute_index_found == 0) { if (to_be_inverted[permute_index_counter][i] != 0) { permute_index_found = 1; } else { permute_index_counter += 1; } } // actually performing the permutation if (permute_index_counter > i) { permute_lines_moist(to_be_inverted, i, permute_index_counter); permute_lines_moist(inv, i, permute_index_counter); } // permutation is done, now comes the actual calculation // dividing the line by to_be_inverted[i][i] factor = 1/to_be_inverted[i][i]; #pragma omp parallel for for (int j = i; j < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++j) { to_be_inverted[i][j] = factor*to_be_inverted[i][j]; } #pragma omp parallel for for (int j = 0; j < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++j) { inv[i][j] = factor*inv[i][j]; } // loop over all the lines that are below the current line #pragma omp parallel for private(factor) for (int j = i + 1; j < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++j) { factor = -to_be_inverted[j][i]; for (int k = i; k < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++k) { to_be_inverted[j][k] = to_be_inverted[j][k] + factor*to_be_inverted[i][k]; } for (int k = 0; k < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++k) { inv[j][k] = inv[j][k] + factor*inv[i][k]; } } } #pragma omp parallel for for (int j = 0; j < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++j) { inv[NO_OF_CHOSEN_OBSERVATIONS_MOIST - 1][j] = inv[NO_OF_CHOSEN_OBSERVATIONS_MOIST - 1][j]/to_be_inverted[NO_OF_CHOSEN_OBSERVATIONS_MOIST - 1][NO_OF_CHOSEN_OBSERVATIONS_MOIST - 1]; } to_be_inverted[NO_OF_CHOSEN_OBSERVATIONS_MOIST - 1][NO_OF_CHOSEN_OBSERVATIONS_MOIST - 1] = 1; /* Gaussian upwards ---------------- */ for (int i = NO_OF_CHOSEN_OBSERVATIONS_MOIST - 1; i >= 1; --i) { #pragma omp parallel for for (int j = i - 1; j >= 0; --j) { for (int k = 0; k < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++k) { inv[j][k] = inv[j][k] - to_be_inverted[j][i]*inv[i][k]; } } } return 0; } int permute_lines_moist(double matrix[][NO_OF_CHOSEN_OBSERVATIONS_MOIST], int line_a, int line_b) { /* Permutes line_a with line_b of matrix. */ double line_a_pre[NO_OF_CHOSEN_OBSERVATIONS_MOIST]; #pragma omp parallel for for (int i = 0; i < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++i) { line_a_pre[i] = matrix[line_a][i]; } #pragma omp parallel for for (int i = 0; i < NO_OF_CHOSEN_OBSERVATIONS_MOIST; ++i) { matrix[line_a][i] = matrix[line_b][i]; matrix[line_b][i] = line_a_pre[i]; } return 0; }
ds800.c
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <math.h> #include <string.h> #include <omp.h> #include "common.h" /** pi / 2 */ #define PI_2 (3.14159 / 2.0) /** distribution function used to smudge pixels together */ #define SKEW(X) ((PI_2 - atan(X)) / PI_2) /** smudge pixels in a target pixel's vecinity together */ static inline void _modif(pixel_t* p, img_t const img, float vh, float vw, size_t i, size_t j) { float r = .0f, g = .0f, b = .0f; float s = .0f; long ii, jj; // define the vecinity long minh = (long)(((long)i - 1) * (vh)); long minw = (long)(((long)j - 1) * (vw)); long maxh = (long)(((long)i + 1) * (vh)); long maxw = (long)(((long)j + 1) * (vw)); // transform for(ii = minh; ii <= maxh; ++ii) { if(ii < 0 || ii >= img.h) continue; for(jj = minw; jj <= maxw; ++jj) { if(jj < 0 || jj >= img.w) continue; float y = (vh * i) - (ii); float x = (vw * j) - (jj); float vv = (vh + vw) / 2.0; float dist = abs( y * y + x * x ) / (vv * vv); float skew = 0.0; dist = dist * dist; skew = SKEW(dist); r += skew * A(img, ii, jj).r; g += skew * A(img, ii, jj).g; b += skew * A(img, ii, jj).b; s += skew; } } // save pixel (*p).r = (int)(r / s); (*p).g = (int)(g / s); (*p).b = (int)(b / s); } typedef struct { img_t* ret; img_t img; float vh, vw; size_t i, retw; } tdata_t; static void tDS(void* data) { tdata_t* mydata = (tdata_t*)data; size_t j; for(j = 0; j < mydata->retw; ++j) { _modif(&A((*(mydata->ret)), mydata->i, j), mydata->img, mydata->vh, mydata->vw, mydata->i, j); } } /** downsample a picture to 800x800 */ img_t downSample800(img_t const img) { img_t ret = { 800, 800, (pixel_t*)malloc(800 * 800 * sizeof(pixel_t)) }; int i, j; float vw = (float)img.w / ret.w; float vh = (float)img.h / ret.h; if(img.w < 800 || img.h < 800) { memcpy(ret.pixels, img.pixels, sizeof(pixel_t) * img.w * img.h); ret.w = img.w; ret.h = img.h; return ret; } assert(img.w >= 800 && img.h >= 800); tdata_t* datas = (tdata_t*)malloc(sizeof(tdata_t) * ret.h); #pragma omp parallel for for(i = 0; i < ret.h; ++i) { tdata_t* data = &datas[i]; data->ret = &ret; data->img = img; data->vh = vh; data->vw = vw; data->i = i; data->retw = ret.w; tDS(data); } free(datas); return ret; }
ch_ompss.c
#include "ch_common.h" #include "../timing.h" // #include "../timing_override.h" void cholesky_mpi(const int ts, const int nt, double *A[nt][nt], double *B, double *C[nt], int *block_rank) { #if defined(CHAMELEON) || defined(CHAMELEON_TARGET) #pragma omp parallel { chameleon_thread_init(); } // necessary to be aware of binary base addresses to calculate offset for target entry functions chameleon_determine_base_addresses((void *)&cholesky_mpi); #endif #pragma omp parallel { #pragma omp single { INIT_TIMING(omp_get_num_threads()); START_TIMING(TIME_TOTAL); { START_TIMING(TIME_CREATE); for (int k = 0; k < nt; k++) { if (block_rank[k*nt+k] == mype) { #pragma omp task depend(out: A[k][k]) firstprivate(k) { //printf("Computing potrf in k=%d\n", k); omp_potrf(A[k][k], ts, ts); } } int comm_sentinel; // <-- sentinel, never actual referenced if (block_rank[k*nt+k] == mype && np != 1) { // use comm_sentinel to make sure this task runs before the communication tasks below #pragma omp task depend(in: A[k][k], comm_sentinel) firstprivate(k) untied { //printf("Communicating potrf in k=%d\n", k); START_TIMING(TIME_COMM); MPI_Request *reqs = NULL; int nreqs = 0; char send_flags[np]; reset_send_flags(send_flags); for (int kk = k+1; kk < nt; kk++) { if (!send_flags[block_rank[k*nt+kk]]) { ++nreqs; send_flags[block_rank[k*nt+kk]] = 1; } } reqs = malloc(sizeof(MPI_Request)*nreqs); nreqs = 0; for (int dst = 0; dst < np; dst++) { if (send_flags[dst] && dst != mype) { MPI_Request send_req; //printf("Sending potrf block to %d in k=%d\n", dst, k); MPI_Isend(A[k][k], ts*ts, MPI_DOUBLE, dst, k*nt+k, MPI_COMM_WORLD, &send_req); reqs[nreqs++] = send_req; } } //printf("Waiting for potrf block in k=%d\n", k); waitall(reqs, nreqs); free(reqs); END_TIMING(TIME_COMM); } } else if (block_rank[k*nt+k] != mype) { // use comm_sentinel to make sure this task runs before the communication tasks below #pragma omp task depend(out: B) depend(in:comm_sentinel) firstprivate(k) untied { START_TIMING(TIME_COMM); int recv_flag = 0; for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype) { recv_flag = 1; break; } } if (recv_flag) { MPI_Request recv_req; MPI_Irecv(B, ts*ts, MPI_DOUBLE, block_rank[k*nt+k], k*nt+k, MPI_COMM_WORLD, &recv_req); //printf("Receiving potrf block from %d in k=%d\n", block_rank[k*nt+k], k); waitall(&recv_req, 1); } END_TIMING(TIME_COMM); } } for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype) { if (block_rank[k*nt+k] == mype) { #pragma omp task depend(in: A[k][k], comm_sentinel) depend(out: A[k][i]) firstprivate(k, i) { omp_trsm(A[k][k], A[k][i], ts, ts); } } else { #pragma omp task depend(in: B, comm_sentinel) depend(out: A[k][i]) firstprivate(k, i) { omp_trsm(B, A[k][i], ts, ts); } } } } #pragma omp task depend(inout: comm_sentinel) firstprivate(k) shared(A) untied { START_TIMING(TIME_COMM); char send_flags[np]; reset_send_flags(send_flags); int nreqs = 0; // upper bound in case all our blocks have to be sent int max_req = (nt-k)*(np-1); MPI_Request *reqs = malloc(sizeof(*reqs)*max_req); for (int i = k + 1; i < nt; i++) { if (block_rank[k*nt+i] == mype && np != 1) { for (int ii = k + 1; ii < i; ii++) { if (!send_flags[block_rank[ii*nt+i]]) { send_flags[block_rank[ii*nt+i]] = 1; } } for (int ii = i + 1; ii < nt; ii++) { if (!send_flags[block_rank[i*nt+ii]]) { send_flags[block_rank[i*nt+ii]] = 1; } } if (!send_flags[block_rank[i*nt+i]]) send_flags[block_rank[i*nt+i]] = 1; for (int dst = 0; dst < np; dst++) { if (send_flags[dst] && dst != mype) { MPI_Request send_req; MPI_Isend(A[k][i], ts*ts, MPI_DOUBLE, dst, k*nt+i, MPI_COMM_WORLD, &send_req); reqs[nreqs++] = send_req; } } reset_send_flags(send_flags); } if (block_rank[k*nt+i] != mype) { int recv_flag = 0; for (int ii = k + 1; ii < i; ii++) { if (block_rank[ii*nt+i] == mype) recv_flag = 1; } for (int ii = i + 1; ii < nt; ii++) { if (block_rank[i*nt+ii] == mype) recv_flag = 1; } if (block_rank[i*nt+i] == mype) recv_flag = 1; if (recv_flag) { MPI_Request recv_req; MPI_Irecv(C[i], ts*ts, MPI_DOUBLE, block_rank[k*nt+i], k*nt+i, MPI_COMM_WORLD, &recv_req); reqs[nreqs++] = recv_req; } } } //printf("Waiting for trsm blocks in k=%d\n", k); waitall(reqs, nreqs); free(reqs); END_TIMING(TIME_COMM); } for (int i = k + 1; i < nt; i++) { for (int j = k + 1; j < i; j++) { if (block_rank[j*nt+i] == mype) { if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] == mype) { #pragma omp task depend(in: A[k][i], A[k][j]) depend(out: A[j][i]) firstprivate(k, j, i) { omp_gemm(A[k][i], A[k][j], A[j][i], ts, ts); } } else if (block_rank[k*nt+i] != mype && block_rank[k*nt+j] == mype) { #pragma omp task depend(in: A[k][j], comm_sentinel) depend(out: A[j][i]) firstprivate(k, j, i) { omp_gemm(C[i], A[k][j], A[j][i], ts, ts); } } else if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] != mype) { #pragma omp task depend(in: A[k][i], comm_sentinel) depend(out: A[j][i]) firstprivate(k, j, i) { omp_gemm(A[k][i], C[j], A[j][i], ts, ts); } } else { #pragma omp task depend(in: comm_sentinel) depend(out: A[j][i]) firstprivate(k, j, i) { omp_gemm(C[i], C[j], A[j][i], ts, ts); } } } } if (block_rank[i*nt+i] == mype) { if (block_rank[k*nt+i] == mype) { #pragma omp task depend(in: A[k][i]) depend(out: A[i][i]) firstprivate(k, i) { omp_syrk(A[k][i], A[i][i], ts, ts); } } else { #pragma omp task depend(in: comm_sentinel) depend(out: A[i][i]) firstprivate(k, i) { omp_syrk(C[i], A[i][i], ts, ts); } } } } } END_TIMING(TIME_CREATE); } #pragma omp taskwait END_TIMING(TIME_TOTAL); MPI_Barrier(MPI_COMM_WORLD); PRINT_TIMINGS(); FREE_TIMING(); }// pragma omp single }// pragma omp parallel #if defined(CHAMELEON) || defined(CHAMELEON_TARGET) chameleon_finalize(); #endif }
affinity.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "resources.h" #include "affinity.h" #include "workload.h" #include "omplib.h" #include "mem.h" void init_affinity(thread_str *t, local_str *local); void end_affinity(thread_str *t, local_str *local); void initialise_thread(local_str *local); void initialise_global_queues(thread_str *t, local_str local); bound_str get_bounds(bound_str space, int current_block, int total_blocks, int offset); int get_stepsize(int low, int high, int total_blocks); int get_most_loaded_thread(int *next_lo, bound_str *local, int nthreads); bound_str get_work(thread_str *t, int thread_id, int nthreads); void runloop_affinity(int loopid) { thread_str t; t.global.lo = 0; t.global.hi = N; #pragma omp parallel default(none) shared(loopid, t) { local_str local; init_affinity(&t, &local); while(1) { if(local.affinity) { local.most_loaded = get_most_loaded_thread(t.next_lo, t.local, local.nthreads); if(local.most_loaded == DONE) break; } local.current = get_work(&t, local.most_loaded, local.nthreads); if(local.current.hi>= t.local[local.most_loaded].hi) { local.current.hi = t.local[local.most_loaded].hi; local.affinity = TRUE; } execute_work(loopid, local.current.lo, local.current.hi); } end_affinity(&t, &local); } // end pragma } void init_affinity(thread_str *t, local_str *local) { initialise_thread(local); #pragma omp single { // allocate required memory for all threads malloc_structure(t, *local); } initialise_global_queues(t, *local); /* make sure all the threads have initialised their shared data */ #pragma omp barrier } void end_affinity(thread_str *t, local_str *local) { #pragma omp barrier #ifdef LOCK deallocate_lock(&t->lock[local->tid]); #pragma omp barrier // ensure all locks are deallocated before freeing memory #endif #pragma omp single { // deallocates required memory for all threads free_structure(t); } } void initialise_thread(local_str *local) { local->tid = get_tid(); local->nthreads = get_total_threads(); local->most_loaded = local->tid; local->affinity = FALSE; } void initialise_global_queues(thread_str *t, local_str local) { #ifdef LOCK allocate_lock(&t->lock[local.tid]); #endif t->local[local.most_loaded] = get_bounds(t->global, local.most_loaded, local.nthreads, 0); t->next_lo[local.most_loaded] = t->local[local.most_loaded].lo; } bound_str get_bounds(bound_str space, int current_block, int total_blocks, int offset) { bound_str boundaries; int ipt = get_stepsize(space.lo, space.hi, total_blocks); boundaries.lo = current_block * ipt + offset; boundaries.hi = (current_block+1) * ipt + offset; if (boundaries.hi > space.hi) boundaries.hi = space.hi; return boundaries; } int get_stepsize(int low, int high, int total_blocks) { return (int) ceil((double)(high - low)/(double)total_blocks); } bound_str get_work(thread_str *t, int thread_id, int nthreads) { bound_str c; int stepsize; #ifdef LOCK set_lock(&t->lock[thread_id]); #else #pragma omp critical { #endif c.lo = t->next_lo[thread_id]; stepsize = get_stepsize(c.lo, t->local[thread_id].hi, nthreads); t->next_lo[thread_id] += stepsize; #ifdef LOCK unset_lock(&t->lock[thread_id]); #else } // end of critical #endif c.hi = c.lo + stepsize; return c; } int get_most_loaded_thread(int *next_lo, bound_str *local, int nthreads) { int max_rem = 0, rem; int i, most_loaded=DONE; for(i=0; i<nthreads; i++) { rem = local[i].hi - next_lo[i]; if(rem > max_rem) { max_rem = rem; most_loaded = i; } } return most_loaded; }
ellipticBuildJacobi.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "elliptic.h" void BuildLocalIpdgBBDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagTri2D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A); void BuildLocalIpdgDiagTet3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); void BuildLocalIpdgDiagHex3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dlong eM, dfloat *A); void BuildLocalContinuousDiagTri2D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A); void BuildLocalContinuousDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *A); void BuildLocalContinuousDiagTet3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A); void BuildLocalContinuousDiagHex3D (elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dfloat *A); void ellipticBuildJacobi(elliptic_t* elliptic, dfloat lambda, dfloat **invDiagA){ mesh_t *mesh = elliptic->mesh; setupAide options = elliptic->options; // surface mass matrices MS = MM*LIFT dfloat *MS = (dfloat *) calloc(mesh->Nfaces*mesh->Nfp*mesh->Nfp,sizeof(dfloat)); for (int f=0;f<mesh->Nfaces;f++) { for (int n=0;n<mesh->Nfp;n++) { int fn = mesh->faceNodes[f*mesh->Nfp+n]; for (int m=0;m<mesh->Nfp;m++) { dfloat MSnm = 0; for (int i=0;i<mesh->Np;i++){ MSnm += mesh->MM[fn+i*mesh->Np]*mesh->LIFT[i*mesh->Nfp*mesh->Nfaces+f*mesh->Nfp+m]; } MS[m+n*mesh->Nfp + f*mesh->Nfp*mesh->Nfp] = MSnm; } } } // build some monolithic basis arrays (for quads and hexes) dfloat *B = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Br = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Bs = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); dfloat *Bt = (dfloat*) calloc(mesh->Np*mesh->Np, sizeof(dfloat)); if (elliptic->elementType==QUADRILATERALS) { int mode = 0; for(int nj=0;nj<mesh->N+1;++nj){ for(int ni=0;ni<mesh->N+1;++ni){ int node = 0; for(int j=0;j<mesh->N+1;++j){ for(int i=0;i<mesh->N+1;++i){ if(nj==j && ni==i) B[mode*mesh->Np+node] = 1; if(nj==j) Br[mode*mesh->Np+node] = mesh->D[ni+mesh->Nq*i]; if(ni==i) Bs[mode*mesh->Np+node] = mesh->D[nj+mesh->Nq*j]; ++node; } } ++mode; } } } if (elliptic->elementType==HEXAHEDRA) { int mode = 0; for(int nk=0;nk<mesh->N+1;++nk){ for(int nj=0;nj<mesh->N+1;++nj){ for(int ni=0;ni<mesh->N+1;++ni){ int node = 0; for(int k=0;k<mesh->N+1;++k){ for(int j=0;j<mesh->N+1;++j){ for(int i=0;i<mesh->N+1;++i){ if(nk==k && nj==j && ni==i) B[mode*mesh->Np+node] = 1; if(nj==j && nk==k) Br[mode*mesh->Np+node] = mesh->D[ni+mesh->Nq*i]; if(ni==i && nk==k) Bs[mode*mesh->Np+node] = mesh->D[nj+mesh->Nq*j]; if(ni==i && nj==j) Bt[mode*mesh->Np+node] = mesh->D[nk+mesh->Nq*k]; ++node; } } } ++mode; } } } } dlong diagNnum = mesh->Np*mesh->Nelements; dfloat *diagA = (dfloat*) calloc(diagNnum, sizeof(dfloat)); if(mesh->rank==0) printf("Building diagonal...");fflush(stdout); if (options.compareArgs("DISCRETIZATION","IPDG")) { switch(elliptic->elementType){ case TRIANGLES: if (options.compareArgs("BASIS","BERN")) { #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgBBDiagTri2D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } else { #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagTri2D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); } break; case QUADRILATERALS: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagQuad2D(elliptic, mesh, lambda, MS, B, Br, Bs, eM, diagA + eM*mesh->Np); break; case TETRAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagTet3D(elliptic, mesh, lambda, MS, eM, diagA + eM*mesh->Np); break; case HEXAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalIpdgDiagHex3D(elliptic, mesh, lambda, MS, B, Br, Bs, Bt, eM, diagA + eM*mesh->Np); break; } } else if (options.compareArgs("DISCRETIZATION","CONTINUOUS")) { switch(elliptic->elementType){ case TRIANGLES: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagTri2D(elliptic, mesh, lambda, eM, diagA + eM*mesh->Np); break; case QUADRILATERALS: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagQuad2D(elliptic, mesh, lambda, eM, B, Br, Bs, diagA + eM*mesh->Np); break; case TETRAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagTet3D(elliptic, mesh, lambda, eM, diagA + eM*mesh->Np); break; case HEXAHEDRA: #pragma omp parallel for for(dlong eM=0;eM<mesh->Nelements;++eM) BuildLocalContinuousDiagHex3D(elliptic, mesh, lambda, eM, B, Br, Bs, Bt, diagA + eM*mesh->Np); break; } } if (options.compareArgs("DISCRETIZATION","CONTINUOUS")) gsParallelGatherScatter(mesh->hostGsh, diagA, dfloatString, "add"); *invDiagA = (dfloat*) calloc(diagNnum, sizeof(dfloat)); for (dlong n=0;n<mesh->Nelements*mesh->Np;n++) { (*invDiagA)[n] = 1/diagA[n]; } if(mesh->rank==0) printf("done.\n"); free(diagA); free(MS); free(B); free(Br); free(Bs); free(Bt); } void BuildLocalIpdgDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat J = mesh->vgeo[vbase+JID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*drdx*drdx*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdx*dsdx*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdx*drdx*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdx*dsdx*mesh->Sss[n*mesh->Np+n]; A[n] += J*drdy*drdy*mesh->Srr[n*mesh->Np+n]; A[n] += J*drdy*dsdy*mesh->Srs[n*mesh->Np+n]; A[n] += J*dsdy*drdy*mesh->Ssr[n*mesh->Np+n]; A[n] += J*dsdy*dsdy*mesh->Sss[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM == nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; } } } } } } void BuildLocalIpdgPatchAxTri2D(elliptic_t* elliptic, mesh_t* mesh, int basisNp, dfloat *basis, dfloat lambda, dfloat *MS, dlong eM, dfloat *A); //generate the BB diagonal by extracting it from the transformed patch void BuildLocalIpdgBBDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dfloat *patchA = (dfloat *) calloc(mesh->Np*mesh->Np,sizeof(dfloat)); int basisNp = mesh->Np; dfloat *basis = mesh->VB; BuildLocalIpdgPatchAxTri2D(elliptic, mesh, basisNp, basis, lambda, MS, eM, patchA); for(int n=0;n<mesh->Np;++n) { A[n] = patchA[n*mesh->Np+n]; //store the diagonal entry } free(patchA); } //returns the continuous C0 patch A matrix for element eM void BuildLocalContinuousDiagTri2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A) { dlong gbase = eM*mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] = J*lambda*mesh->MM[n+n*mesh->Np]; A[n] += Grr*mesh->Srr[n+n*mesh->Np]; A[n] += Grs*mesh->Srs[n+n*mesh->Np]; A[n] += Grs*mesh->Ssr[n+n*mesh->Np]; A[n] += Gss*mesh->Sss[n+n*mesh->Np]; } else { A[n] = 1; //just put a 1 so A is invertable } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn]; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn]; A[n] += JW*(dlndx*dlndx+dlndy*dlndy); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM]; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM]; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*(1+bcD)*(1-bcN)*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagQuad2D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat* Bs, dfloat *A) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int iid = nx+ny*mesh->Nq; if (elliptic->mapB[nx+ny*mesh->Nq+eM*mesh->Np]!=1) { A[iid] = 0; for (int k=0;k<mesh->Nq;k++) { int id = k+ny*mesh->Nq; dfloat Grr = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G00ID*mesh->Np]; A[iid] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int id = nx+k*mesh->Nq; dfloat Gss = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G11ID*mesh->Np]; A[iid] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } int id = nx+ny*mesh->Nq; dfloat Grs = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + G01ID*mesh->Np]; A[iid] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; dfloat JW = mesh->ggeo[eM*mesh->Np*mesh->Nggeo + id + GWJID*mesh->Np]; A[iid] += JW*lambda; } else { A[iid] = 1; //just put a 1 so A is invertable } } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagTet3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dlong eM, dfloat *A) { dlong vbase = eM*mesh->Nvgeo; dfloat drdx = mesh->vgeo[vbase+RXID]; dfloat drdy = mesh->vgeo[vbase+RYID]; dfloat drdz = mesh->vgeo[vbase+RZID]; dfloat dsdx = mesh->vgeo[vbase+SXID]; dfloat dsdy = mesh->vgeo[vbase+SYID]; dfloat dsdz = mesh->vgeo[vbase+SZID]; dfloat dtdx = mesh->vgeo[vbase+TXID]; dfloat dtdy = mesh->vgeo[vbase+TYID]; dfloat dtdz = mesh->vgeo[vbase+TZID]; dfloat J = mesh->vgeo[vbase+JID]; dfloat G00 = drdx*drdx + drdy*drdy + drdz*drdz; dfloat G01 = drdx*dsdx + drdy*dsdy + drdz*dsdz; dfloat G02 = drdx*dtdx + drdy*dtdy + drdz*dtdz; dfloat G10 = dsdx*drdx + dsdy*drdy + dsdz*drdz; dfloat G11 = dsdx*dsdx + dsdy*dsdy + dsdz*dsdz; dfloat G12 = dsdx*dtdx + dsdy*dtdy + dsdz*dtdz; dfloat G20 = dtdx*drdx + dtdy*drdy + dtdz*drdz; dfloat G21 = dtdx*dsdx + dtdy*dsdy + dtdz*dsdz; dfloat G22 = dtdx*dtdx + dtdy*dtdy + dtdz*dtdz; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = J*lambda*mesh->MM[n*mesh->Np+n]; A[n] += J*G00*mesh->Srr[n*mesh->Np+n]; A[n] += J*G01*mesh->Srs[n*mesh->Np+n]; A[n] += J*G02*mesh->Srt[n*mesh->Np+n]; A[n] += J*G10*mesh->Ssr[n*mesh->Np+n]; A[n] += J*G11*mesh->Sss[n*mesh->Np+n]; A[n] += J*G12*mesh->Sst[n*mesh->Np+n]; A[n] += J*G20*mesh->Str[n*mesh->Np+n]; A[n] += J*G21*mesh->Sts[n*mesh->Np+n]; A[n] += J*G22*mesh->Stt[n*mesh->Np+n]; } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } for (int fM=0;fM<mesh->Nfaces;fM++) { // load surface geofactors for this face dlong sid = mesh->Nsgeo*(eM*mesh->Nfaces+fM); dfloat nx = mesh->sgeo[sid+NXID]; dfloat ny = mesh->sgeo[sid+NYID]; dfloat nz = mesh->sgeo[sid+NZID]; dfloat sJ = mesh->sgeo[sid+SJID]; dfloat hinv = mesh->sgeo[sid+IHID]; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag dfloat penalty = elliptic->tau*hinv; int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } // mass matrix for this face dfloat *MSf = MS+fM*mesh->Nfp*mesh->Nfp; // penalty term just involves face nodes for(int n=0;n<mesh->Nfp;++n){ for(int m=0;m<mesh->Nfp;++m){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==nM) { // OP11 = OP11 + 0.5*( gtau*mmE ) dfloat MSfnm = sJ*MSf[n*mesh->Nfp+m]; A[nM] += 0.5*(1.-bcN)*(1.+bcD)*penalty*MSfnm; } } } // now add differential surface terms for(int n=0;n<mesh->Nfp;++n){ int nM = mesh->faceNodes[fM*mesh->Nfp+n]; for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfni = sJ*MSf[n*mesh->Nfp+i]; // surface Jacobian built in dfloat DxMim = drdx*mesh->Dr[iM*mesh->Np+nM] + dsdx*mesh->Ds[iM*mesh->Np+nM] + dtdx*mesh->Dt[iM*mesh->Np+nM]; dfloat DyMim = drdy*mesh->Dr[iM*mesh->Np+nM] + dsdy*mesh->Ds[iM*mesh->Np+nM] + dtdy*mesh->Dt[iM*mesh->Np+nM]; dfloat DzMim = drdz*mesh->Dr[iM*mesh->Np+nM] + dsdz*mesh->Ds[iM*mesh->Np+nM] + dtdz*mesh->Dt[iM*mesh->Np+nM]; // OP11 = OP11 + 0.5*( - mmE*Dn1) A[nM] += -0.5*nx*(1+bcD)*(1-bcN)*MSfni*DxMim; A[nM] += -0.5*ny*(1+bcD)*(1-bcN)*MSfni*DyMim; A[nM] += -0.5*nz*(1+bcD)*(1-bcN)*MSfni*DzMim; } } for(int n=0;n<mesh->Np;++n){ for(int m=0;m<mesh->Nfp;++m){ int mM = mesh->faceNodes[fM*mesh->Nfp+m]; if (mM==n) { for(int i=0;i<mesh->Nfp;++i){ int iM = mesh->faceNodes[fM*mesh->Nfp+i]; dfloat MSfim = sJ*MSf[i*mesh->Nfp+m]; dfloat DxMin = drdx*mesh->Dr[iM*mesh->Np+n] + dsdx*mesh->Ds[iM*mesh->Np+n] + dtdx*mesh->Dt[iM*mesh->Np+n]; dfloat DyMin = drdy*mesh->Dr[iM*mesh->Np+n] + dsdy*mesh->Ds[iM*mesh->Np+n] + dtdy*mesh->Dt[iM*mesh->Np+n]; dfloat DzMin = drdz*mesh->Dr[iM*mesh->Np+n] + dsdz*mesh->Ds[iM*mesh->Np+n] + dtdz*mesh->Dt[iM*mesh->Np+n]; // OP11 = OP11 + (- Dn1'*mmE ); A[n] += -0.5*nx*(1+bcD)*(1-bcN)*DxMin*MSfim; A[n] += -0.5*ny*(1+bcD)*(1-bcN)*DyMin*MSfim; A[n] += -0.5*nz*(1+bcD)*(1-bcN)*DzMin*MSfim; } } } } } } void BuildLocalContinuousDiagTet3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *A) { dlong gbase = eM*mesh->Nggeo; dfloat Grr = mesh->ggeo[gbase + G00ID]; dfloat Grs = mesh->ggeo[gbase + G01ID]; dfloat Grt = mesh->ggeo[gbase + G02ID]; dfloat Gss = mesh->ggeo[gbase + G11ID]; dfloat Gst = mesh->ggeo[gbase + G12ID]; dfloat Gtt = mesh->ggeo[gbase + G22ID]; dfloat J = mesh->ggeo[gbase + GWJID]; /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] = J*lambda*mesh->MM[n+n*mesh->Np]; A[n] += Grr*mesh->Srr[n+n*mesh->Np]; A[n] += Grs*mesh->Srs[n+n*mesh->Np]; A[n] += Grt*mesh->Srt[n+n*mesh->Np]; A[n] += Grs*mesh->Ssr[n+n*mesh->Np]; A[n] += Gss*mesh->Sss[n+n*mesh->Np]; A[n] += Gst*mesh->Sst[n+n*mesh->Np]; A[n] += Grt*mesh->Str[n+n*mesh->Np]; A[n] += Gst*mesh->Sts[n+n*mesh->Np]; A[n] += Gtt*mesh->Stt[n+n*mesh->Np]; } else { A[n] = 1; //just put a 1 so A is invertable } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } } void BuildLocalIpdgDiagHex3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dfloat *MS, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dlong eM, dfloat *A) { /* start with stiffness matrix */ for(int n=0;n<mesh->Np;++n){ A[n] = 0; // (grad phi_n, grad phi_m)_{D^e} for(int i=0;i<mesh->Np;++i){ dlong base = eM*mesh->Np*mesh->Nvgeo + i; dfloat drdx = mesh->vgeo[base+mesh->Np*RXID]; dfloat drdy = mesh->vgeo[base+mesh->Np*RYID]; dfloat drdz = mesh->vgeo[base+mesh->Np*RZID]; dfloat dsdx = mesh->vgeo[base+mesh->Np*SXID]; dfloat dsdy = mesh->vgeo[base+mesh->Np*SYID]; dfloat dsdz = mesh->vgeo[base+mesh->Np*SZID]; dfloat dtdx = mesh->vgeo[base+mesh->Np*TXID]; dfloat dtdy = mesh->vgeo[base+mesh->Np*TYID]; dfloat dtdz = mesh->vgeo[base+mesh->Np*TZID]; dfloat JW = mesh->vgeo[base+mesh->Np*JWID]; int idn = n*mesh->Np+i; dfloat dlndx = drdx*Br[idn] + dsdx*Bs[idn] + dtdx*Bt[idn]; dfloat dlndy = drdy*Br[idn] + dsdy*Bs[idn] + dtdy*Bt[idn]; dfloat dlndz = drdz*Br[idn] + dsdz*Bs[idn] + dtdz*Bt[idn]; A[n] += JW*(dlndx*dlndx+dlndy*dlndy+dlndz*dlndz); A[n] += lambda*JW*B[idn]*B[idn]; } for (int fM=0;fM<mesh->Nfaces;fM++) { // accumulate flux terms for negative and positive traces for(int i=0;i<mesh->Nfp;++i){ int vidM = mesh->faceNodes[i+fM*mesh->Nfp]; // grab vol geofacs at surface nodes dlong baseM = eM*mesh->Np*mesh->Nvgeo + vidM; dfloat drdxM = mesh->vgeo[baseM+mesh->Np*RXID]; dfloat drdyM = mesh->vgeo[baseM+mesh->Np*RYID]; dfloat drdzM = mesh->vgeo[baseM+mesh->Np*RZID]; dfloat dsdxM = mesh->vgeo[baseM+mesh->Np*SXID]; dfloat dsdyM = mesh->vgeo[baseM+mesh->Np*SYID]; dfloat dsdzM = mesh->vgeo[baseM+mesh->Np*SZID]; dfloat dtdxM = mesh->vgeo[baseM+mesh->Np*TXID]; dfloat dtdyM = mesh->vgeo[baseM+mesh->Np*TYID]; dfloat dtdzM = mesh->vgeo[baseM+mesh->Np*TZID]; // grab surface geometric factors dlong base = mesh->Nsgeo*(eM*mesh->Nfp*mesh->Nfaces + fM*mesh->Nfp + i); dfloat nx = mesh->sgeo[base+NXID]; dfloat ny = mesh->sgeo[base+NYID]; dfloat nz = mesh->sgeo[base+NZID]; dfloat wsJ = mesh->sgeo[base+WSJID]; dfloat hinv = mesh->sgeo[base+IHID]; // form negative trace terms in IPDG int idnM = n*mesh->Np+vidM; dfloat dlndxM = drdxM*Br[idnM] + dsdxM*Bs[idnM] + dtdxM*Bt[idnM]; dfloat dlndyM = drdyM*Br[idnM] + dsdyM*Bs[idnM] + dtdyM*Bt[idnM]; dfloat dlndzM = drdzM*Br[idnM] + dsdzM*Bs[idnM] + dtdzM*Bt[idnM]; dfloat ndotgradlnM = nx*dlndxM+ny*dlndyM+nz*dlndzM; dfloat lnM = B[idnM]; dfloat penalty = elliptic->tau*hinv; int bc = mesh->EToB[fM+mesh->Nfaces*eM]; //raw boundary flag int bcD = 0, bcN =0; int bcType = 0; if(bc>0) bcType = elliptic->BCType[bc]; //find its type (Dirichlet/Neumann) // this needs to be double checked (and the code where these are used) if(bcType==1){ // Dirichlet bcD = 1; bcN = 0; } else if(bcType==2){ // Neumann bcD = 0; bcN = 1; } A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*lnM*ndotgradlnM; // -(ln^-, N.grad lm^-) A[n] += -0.5*(1+bcD)*(1-bcN)*wsJ*ndotgradlnM*lnM; // -(N.grad ln^-, lm^-) A[n] += +0.5*(1+bcD)*(1-bcN)*wsJ*penalty*lnM*lnM; // +((tau/h)*ln^-,lm^-) } } } } void BuildLocalContinuousDiagHex3D(elliptic_t* elliptic, mesh_t *mesh, dfloat lambda, dlong eM, dfloat *B, dfloat *Br, dfloat *Bs, dfloat *Bt, dfloat *A) { for (int nz=0;nz<mesh->Nq;nz++) { for (int ny=0;ny<mesh->Nq;ny++) { for (int nx=0;nx<mesh->Nq;nx++) { int idn = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; if (elliptic->mapB[idn+eM*mesh->Np]!=1) { A[idn] = 0; int id = nx+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dlong base = eM*mesh->Np*mesh->Nggeo; dfloat Grs = mesh->ggeo[base + id + G01ID*mesh->Np]; A[idn] += 2*Grs*mesh->D[nx+nx*mesh->Nq]*mesh->D[ny+ny*mesh->Nq]; dfloat Grt = mesh->ggeo[base + id + G02ID*mesh->Np]; A[idn] += 2*Grt*mesh->D[nx+nx*mesh->Nq]*mesh->D[nz+nz*mesh->Nq]; dfloat Gst = mesh->ggeo[base + id + G12ID*mesh->Np]; A[idn] += 2*Gst*mesh->D[ny+ny*mesh->Nq]*mesh->D[nz+nz*mesh->Nq]; for (int k=0;k<mesh->Nq;k++) { int iid = k+ny*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Grr = mesh->ggeo[base + iid + G00ID*mesh->Np]; A[idn] += Grr*mesh->D[nx+k*mesh->Nq]*mesh->D[nx+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int iid = nx+k*mesh->Nq+nz*mesh->Nq*mesh->Nq; dfloat Gss = mesh->ggeo[base + iid + G11ID*mesh->Np]; A[idn] += Gss*mesh->D[ny+k*mesh->Nq]*mesh->D[ny+k*mesh->Nq]; } for (int k=0;k<mesh->Nq;k++) { int iid = nx+ny*mesh->Nq+k*mesh->Nq*mesh->Nq; dfloat Gtt = mesh->ggeo[base + iid + G22ID*mesh->Np]; A[idn] += Gtt*mesh->D[nz+k*mesh->Nq]*mesh->D[nz+k*mesh->Nq]; } dfloat JW = mesh->ggeo[base + id + GWJID*mesh->Np]; A[idn] += JW*lambda; } else { A[idn] = 1; //just put a 1 so A is invertable } } } } //add the rank boost for the allNeumann Poisson problem if (elliptic->allNeumann) { for(int n=0;n<mesh->Np;++n){ if (elliptic->mapB[n+eM*mesh->Np]!=1) { //dont fill rows for masked nodes A[n] += elliptic->allNeumannPenalty*elliptic->allNeumannScale*elliptic->allNeumannScale; } } } }
apply_bcs_curvilinear.h
// Declare boundary condition BC_UPDATE_OUTER macro, // which updates a single outer boundary face // of the 3D grid cube using quadratic polynomial // extrapolation. #define BC_UPDATE_OUTER(which_gf, i0,i1,i2, FACEX0,FACEX1,FACEX2) { \ const int idx3 = IDX3S(i0,i1,i2); \ gfs[IDX4S(which_gf,i0,i1,i2)] = \ +3.0*gfs[IDX4S(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \ -3.0*gfs[IDX4S(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \ +1.0*gfs[IDX4S(which_gf,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \ } // Curvilinear boundary condition driver routine: Apply BCs to all six // boundary faces of the 3D numerical domain, filling in the // innermost ghost zone layer first, and moving outward. void apply_bcs_curvilinear_single_gf(const paramstruct *restrict params, const bc_struct *restrict bcstruct, const int8_t *restrict gfs_parity, const int which_gf, REAL *restrict gfs) { #include "RELATIVE_PATH__set_Cparameters.h" /* Header file containing correct #include for set_Cparameters.h; * accounting for the relative path */ for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { // First apply OUTER boundary conditions, // in case an INNER (parity) boundary point // needs data at the outer boundary: // After updating each face, adjust imin[] and imax[] // to reflect the newly-updated face extents. for(int pt=0;pt<bcstruct->num_ob_gz_pts[which_gz];pt++) { BC_UPDATE_OUTER(which_gf, bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i0, bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i1, bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i2, bcstruct->outer[which_gz][pt].FACEi0, bcstruct->outer[which_gz][pt].FACEi1, bcstruct->outer[which_gz][pt].FACEi2); } // Then apply INNER (parity) boundary conditions: for(int pt=0;pt<bcstruct->num_ib_gz_pts[which_gz];pt++) { const int i0dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i0; const int i1dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i1; const int i2dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i2; const int i0src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i0; const int i1src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i1; const int i2src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i2; const int8_t *prty= bcstruct->inner[which_gz][pt].parity; // printf("%d\n",bcstruct->inner_bc_parity[which_gz][pt].parity[gfs_parity[which_gf]]); gfs[IDX4S(which_gf,i0dest,i1dest,i2dest)] = bcstruct->inner[which_gz][pt].parity[gfs_parity[which_gf]] * gfs[IDX4S(which_gf, i0src,i1src,i2src)]; } // END for(int pt=0;pt<num_ib_gz_pts[which_gz];pt++) } // END for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) } // END function void apply_bcs_curvilinear(const paramstruct *restrict params, const bc_struct *restrict bcstruct, const int NUM_GFS, const int8_t *restrict gfs_parity, REAL *restrict gfs) { #pragma omp parallel for for(int which_gf=0;which_gf<NUM_GFS;which_gf++) { apply_bcs_curvilinear_single_gf(params, bcstruct, gfs_parity, which_gf, gfs); } // END for(int which_gf=0;which_gf<NUM_GFS;which_gf++) } // END function
static.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <unistd.h> int main() { int n_threads, i; /* Schedule allows you to create the scheme with which the threads distribute the work of an iteration of a cycle. "static": means that iterations blocks are mapped statically to the execution threads in a round-robin fashion. The nice thing with static scheduling is that OpenMP run-time guarantees that if you have two separate loops with the same number of iterations and execute them with the same number of threads using static scheduling, */ #pragma omp parallel for private(i) schedule(static) num_threads(4) for(i=0; i<16; i++) { //wait i second sleep(i); printf("The thread %d has completed the iteration %d\n", omp_get_thread_num(), i); } printf("All threads have ended!\n"); return 0; }
app.c
/** * Christina Giannoula * cgiannoula: christina.giann@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <dpu.h> #include <dpu_log.h> #include <unistd.h> #include <getopt.h> #include <assert.h> #include <omp.h> #include <math.h> #include "../support/common.h" #include "../support/matrix.h" #include "../support/params.h" #include "../support/partition.h" #include "../support/timer.h" #include "../support/utils.h" // Define the DPU Binary path as DPU_BINARY here. #ifndef DPU_BINARY #define DPU_BINARY "./bin/spmv_dpu" #endif #define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MB /* * Main Structures: * 1. Matrices * 2. Input vector * 3. Output vector * 4. Help structures for data partitioning */ static struct BDCOOMatrix* A; static struct COOMatrix* B; static val_dt* x; static val_dt* y; static val_dt* z; static struct partition_info_t *part_info; /** * @brief Specific information for each DPU */ struct dpu_info_t { uint32_t rows_per_dpu; uint32_t cols_per_dpu; uint32_t rows_per_dpu_pad; uint32_t prev_rows_dpu; uint32_t prev_nnz_dpu; uint32_t nnz; uint32_t nnz_pad; }; struct dpu_info_t *dpu_info; /** * @brief find the dpus_per_row_partition * @param factor n to create partitions * @param column_partitions to create vert_partitions * @param horz_partitions to return the 2D partitioning */ void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) { uint32_t dpus_per_vert_partition = n / vert_partitions; *horz_partitions = dpus_per_vert_partition; } /** * @brief initialize input vector * @param pointer to input vector and vector size */ void init_vector(val_dt* vec, uint32_t size) { for(unsigned int i = 0; i < size; ++i) { vec[i] = (val_dt) (i%4+1); } } /** * @brief compute output in the host CPU */ static void spmv_host(val_dt* y, struct BDCOOMatrix *A, val_dt* x) { uint64_t total_nnzs = 0; for (uint32_t c = 0; c < A->vert_partitions; c++) { uint32_t col_offset = A->vert_tile_widths[c]; for(uint32_t n = 0; n < A->nnzs_per_vert_partition[c]; n++) { uint32_t rowIndx = A->nnzs[total_nnzs].rowind; uint32_t colIndx = A->nnzs[total_nnzs].colind; val_dt value = A->nnzs[total_nnzs++].val; y[rowIndx] += (value * x[col_offset + colIndx]); } } } /** * @brief main of the host application */ int main(int argc, char **argv) { struct Params p = input_params(argc, argv); struct dpu_set_t dpu_set, dpu; uint32_t nr_of_dpus; uint32_t nr_of_ranks; // Allocate DPUs and load binary DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set)); DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL)); DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus)); DPU_ASSERT(dpu_get_nr_ranks(dpu_set, &nr_of_ranks)); printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus); printf("[INFO] Allocated %d Rank(s)\n", nr_of_ranks); printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS); unsigned int i; // Initialize input data B = readCOOMatrix(p.fileName); sortCOOMatrix(B); uint32_t horz_partitions = 0; uint32_t vert_partitions = p.vert_partitions; find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions); printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions); A = coo2bdcoo(B, horz_partitions, vert_partitions); freeCOOMatrix(B); // Initialize partition data part_info = partition_init(A, nr_of_dpus, p.max_nranks, NR_TASKLETS); #if FG_TRANS struct dpu_set_t rank; uint32_t each_rank; DPU_RANK_FOREACH(dpu_set, rank, each_rank){ uint32_t nr_dpus_in_rank; DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank)); part_info->active_dpus_per_rank[each_rank+1] = nr_dpus_in_rank; } int sum = 0; for(int i=0; i < p.max_nranks+1; i++) { part_info->accum_dpus_ranks[i] = part_info->active_dpus_per_rank[i] + sum; sum += part_info->active_dpus_per_rank[i]; } #endif // Initialize help data - Padding needed uint32_t ncols_pad = A->vert_tile_widths[A->vert_partitions-1] + A->max_tile_width; uint32_t tile_width_pad = A->max_tile_width; uint32_t nrows_pad = A->nrows; if (ncols_pad % (8 / byte_dt) != 0) ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt))); if (tile_width_pad % (8 / byte_dt) != 0) tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt))); if (nrows_pad % (8 / byte_dt) != 0) nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt))); // Allocate input vector x = (val_dt *) malloc(ncols_pad * sizeof(val_dt)); // Allocate output vector z = (val_dt *) calloc(nrows_pad, sizeof(val_dt)); // Initialize input vector with arbitrary data init_vector(x, ncols_pad); // Load-balance nnzs among DPUs of the same vertical partition partition_by_nnz(A, part_info); // Initialize help data dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t)); dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t)); // Max limits for parallel transfers uint64_t max_rows_per_dpu = 0; uint64_t max_nnz_per_dpu = 0; // Timer for measurements Timer timer; uint64_t total_nnzs = 0; i = 0; DPU_FOREACH(dpu_set, dpu, i) { // Find padding for rows and non-zero elements needed for CPU-DPU transfers uint32_t tile_horz_indx = i % A->horz_partitions; uint32_t tile_vert_indx = i / A->horz_partitions; uint32_t rows_per_dpu = part_info->row_split[tile_vert_indx * (2 * A->horz_partitions) + 2 * tile_horz_indx + 1] - part_info->row_split[tile_vert_indx * (2 * A->horz_partitions) + 2 * tile_horz_indx]; uint32_t prev_rows_dpu = part_info->row_split[tile_vert_indx * (2 * A->horz_partitions) + 2 * tile_horz_indx]; if (rows_per_dpu > max_rows_per_dpu) max_rows_per_dpu = rows_per_dpu; uint32_t rows_per_dpu_pad = rows_per_dpu; if (rows_per_dpu_pad % (8 / byte_dt) != 0) rows_per_dpu_pad += ((8 / byte_dt) - (rows_per_dpu_pad % (8 / byte_dt))); unsigned int nnz=0, nnz_pad; nnz = part_info->nnz_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx + 1] - part_info->nnz_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx]; if (nnz % (8 / byte_dt) != 0) nnz_pad = nnz + ((8 / byte_dt) - (nnz % (8 / byte_dt))); else nnz_pad = nnz; if (nnz_pad > max_nnz_per_dpu) max_nnz_per_dpu = nnz_pad; uint32_t prev_nnz_dpu = total_nnzs; total_nnzs += nnz; // Keep information per DPU dpu_info[i].rows_per_dpu = rows_per_dpu; dpu_info[i].rows_per_dpu_pad = rows_per_dpu_pad; dpu_info[i].prev_rows_dpu = prev_rows_dpu; dpu_info[i].cols_per_dpu = A->vert_tile_widths[tile_vert_indx+1] - A->vert_tile_widths[tile_vert_indx]; dpu_info[i].prev_nnz_dpu = prev_nnz_dpu; dpu_info[i].nnz = nnz; dpu_info[i].nnz_pad = nnz_pad; // Find input arguments per DPU input_args[i].nrows = rows_per_dpu; input_args[i].tcols = tile_width_pad; input_args[i].tstart_row = prev_rows_dpu; // Load-balance nnzs across tasklets of a DPU for(unsigned int tasklet_id=0; tasklet_id < NR_TASKLETS; tasklet_id++) { uint32_t nnz_chunks = nnz / NR_TASKLETS; uint32_t rest_nnzs = nnz % NR_TASKLETS; uint32_t nnz_per_tasklet = nnz_chunks; uint32_t prev_nnz; if (tasklet_id < rest_nnzs) nnz_per_tasklet++; if (rest_nnzs > 0) { if (tasklet_id >= rest_nnzs) prev_nnz = rest_nnzs * (nnz_chunks + 1) + (tasklet_id - rest_nnzs) * nnz_chunks; else prev_nnz = tasklet_id * (nnz_chunks + 1); } else { prev_nnz = tasklet_id * nnz_chunks; } // Find input arguments per tasklet input_args[i].start_nnz[tasklet_id] = prev_nnz; input_args[i].nnz_per_tasklet[tasklet_id] = nnz_per_tasklet; } } #if FG_TRANS // Find max number of rows and columns (subset of elements of the output vector) among DPUs of each rank DPU_RANK_FOREACH(dpu_set, rank, each_rank){ uint32_t max_rows_cur_rank = 0; uint32_t max_cols_cur_rank = 0; uint32_t nr_dpus_in_rank; DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank)); uint32_t start_dpu = part_info->accum_dpus_ranks[each_rank]; for (uint32_t k = 0; k < nr_dpus_in_rank; k++) { if (start_dpu + k >= nr_of_dpus) break; if (dpu_info[start_dpu + k].rows_per_dpu > max_rows_cur_rank) max_rows_cur_rank = dpu_info[start_dpu + k].rows_per_dpu; if (dpu_info[start_dpu + k].cols_per_dpu > max_cols_cur_rank) max_cols_cur_rank = dpu_info[start_dpu + k].cols_per_dpu; } if (max_rows_cur_rank % (8 / byte_dt) != 0) max_rows_cur_rank += ((8 / byte_dt) - (max_rows_cur_rank % (8 / byte_dt))); if (max_cols_cur_rank % (8 / byte_dt) != 0) max_cols_cur_rank += ((8 / byte_dt) - (max_cols_cur_rank % (8 / byte_dt))); part_info->max_rows_per_rank[each_rank] = (uint32_t) max_rows_cur_rank; part_info->max_cols_per_rank[each_rank] = (uint32_t) max_cols_cur_rank; } #endif // Initializations for parallel transfers with padding needed if (max_rows_per_dpu % (8 / byte_dt) != 0) max_rows_per_dpu += ((8 / byte_dt) - (max_rows_per_dpu % (8 / byte_dt))); if (max_nnz_per_dpu % (8 / byte_dt) != 0) max_nnz_per_dpu += ((8 / byte_dt) - (max_nnz_per_dpu % (8 / byte_dt))); // Re-allocations for padding needed A->nnzs = (struct elem_t *) realloc(A->nnzs, (dpu_info[nr_of_dpus-1].prev_nnz_dpu + max_nnz_per_dpu) * sizeof(struct elem_t)); y = (val_dt *) calloc((uint64_t) ((uint64_t) nr_of_dpus) * ((uint64_t) max_rows_per_dpu), sizeof(val_dt)); // Count total number of bytes to be transfered in MRAM of DPU unsigned long int total_bytes; total_bytes = ((max_nnz_per_dpu) * sizeof(struct elem_t)) + (tile_width_pad * sizeof(val_dt)) + (max_rows_per_dpu * sizeof(val_dt)); assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size"); // Copy input arguments to DPUs i = 0; DPU_FOREACH(dpu_set, dpu, i) { input_args[i].max_rows_per_dpu = max_rows_per_dpu; DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT)); // Copy input matrix to DPUs startTimer(&timer, 0); // Copy Input Array i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->nnzs + dpu_info[i].prev_nnz_dpu)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt) + tile_width_pad * sizeof(val_dt), max_nnz_per_dpu * sizeof(struct elem_t), DPU_XFER_DEFAULT)); stopTimer(&timer, 0); // Copy input vector to DPUs startTimer(&timer, 1); #if CG_TRANS // Coarse-grained data transfers in the input vector i = 0; DPU_FOREACH(dpu_set, dpu, i) { uint32_t tile_vert_indx = i / A->horz_partitions; DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx])); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT)); #endif #if FG_TRANS #if YFG_TRANS // Coarse-grained data transfers in the input vector at rank granularity i = 0; DPU_FOREACH(dpu_set, dpu, i) { uint32_t tile_vert_indx = i / A->horz_partitions; DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx])); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT)); #else // Fine-grained data transfers in the input vector at rank granularity i = 0; DPU_FOREACH(dpu_set, dpu, i) { uint32_t tile_vert_indx = i / A->horz_partitions; DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx])); } i = 0; //struct dpu_set_t rank; DPU_RANK_FOREACH(dpu_set, rank) { DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt), part_info->max_cols_per_rank[i] * sizeof(val_dt), DPU_XFER_ASYNC)); i++; } DPU_ASSERT(dpu_sync(dpu_set)); #endif #endif stopTimer(&timer, 1); // Run kernel on DPUs startTimer(&timer, 2); DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS)); stopTimer(&timer, 2); #if LOG // Display DPU Log (default: disabled) DPU_FOREACH(dpu_set, dpu) { DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout)); } #endif // Retrieve results for output vector from DPUs startTimer(&timer, 3); #if CG_TRANS // Coarse-grained data transfers in the output vector i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_rows_per_dpu))); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_rows_per_dpu * sizeof(val_dt), DPU_XFER_DEFAULT)); #endif #if FG_TRANS // Fine-grained data transfers in the output vector at rank granularity i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, y + i * max_rows_per_dpu)); } i = 0; DPU_RANK_FOREACH(dpu_set, rank) { DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, part_info->max_rows_per_rank[i] * sizeof(val_dt), DPU_XFER_ASYNC)); i++; } DPU_ASSERT(dpu_sync(dpu_set)); #endif stopTimer(&timer, 3); // Merge partial results to the host CPU startTimer(&timer, 4); uint32_t r, c, t; for (c = 0; c < A->vert_partitions; c++) { for (r = 0; r < A->horz_partitions; r++) { #pragma omp parallel for num_threads(p.nthreads) shared(A, z, y, max_rows_per_dpu, c, r) private(t) for (t = 0; t < part_info->row_split[c * (2 * A->horz_partitions) + 2 * r+1] - part_info->row_split[c * (2 * A->horz_partitions) + 2 * r]; t++) { z[part_info->row_split[c * (2 * A->horz_partitions) + 2 * r] + t] += y[(c * A->horz_partitions + r) * max_rows_per_dpu + t]; } } } stopTimer(&timer, 4); // Print timing results printf("\n"); printf("Load Matrix "); printTimer(&timer, 0); printf("Load Input Vector "); printTimer(&timer, 1); printf("Kernel "); printTimer(&timer, 2); printf("Retrieve Output Vector "); printTimer(&timer, 3); printf("Merge Partial Results "); printTimer(&timer, 4); printf("\n\n"); #if CHECK_CORR // Check output startTimer(&timer, 4); val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt)); spmv_host(y_host, A, x); bool status = true; i = 0; for (i = 0; i < A->nrows; i++) { if(y_host[i] != z[i]) { status = false; } } if (status) { printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n"); } else { printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n"); } free(y_host); #endif // Deallocation freeBDCOOMatrix(A); free(x); free(y); free(z); partition_free(part_info); DPU_ASSERT(dpu_free(dpu_set)); return 0; }
django_scrypt_fmt_plug.c
/* scrypt cracker patch for JtR. Hacked together during May of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_django_scrypt; #elif FMT_REGISTERS_H john_register_one(&fmt_django_scrypt); #else #include <string.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "base64.h" #include "escrypt/crypto_scrypt.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #define OMP_SCALE 1 // So slow a format, a multiplier is NOT needed #endif #include "memdbg.h" #define FORMAT_LABEL "django-scrypt" #define FORMAT_NAME "" #define FORMAT_TAG "scrypt" #define TAG_LENGTH 6 #ifdef __XOP__ #define ALGORITHM_NAME "Salsa20/8 128/128 XOP" #elif defined(__AVX__) #define ALGORITHM_NAME "Salsa20/8 128/128 AVX" #elif defined(__SSE2__) #define ALGORITHM_NAME "Salsa20/8 128/128 SSE2" #else #define ALGORITHM_NAME "Salsa20/8 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 64 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN 4 #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 /* notastrongpassword => scrypt$NBGmaGIXijJW$14$8$1$64$achPt01SbytSt+F3CcCFgEPr96+/j9iCTdejFdAARZ8mzfejrP64TJ5XBJa3gYwuCKOEGlw2E/lWCWS7LeS6CA== */ static struct fmt_tests scrypt_tests[] = { /* https://pypi.python.org/pypi/django-scrypt/ format hashes */ {"scrypt$NBGmaGIXijJW$14$8$1$64$achPt01SbytSt+F3CcCFgEPr96+/j9iCTdejFdAARZ8mzfejrP64TJ5XBJa3gYwuCKOEGlw2E/lWCWS7LeS6CA==", "notastrongpassword"}, {"scrypt$Cj0PzdtT3qS2$14$8$1$64$qn4CDnM8CcIBNrpQXHo6ti8vSUoSXj7GBFy7k1bp5wPs8jKjh/gHZ+qM9uk6LbcVHm02yBaI5WCbDm/Shq/MXA==", "realmenuseJtR"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { /* int type; */ // not used (another type probably required a new JtR format) int N; int r; int p; unsigned char salt[32]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int isDigits(char *p) { while (*p && *p != '$') { if (*p <= '0' || *p >= '9') return 0; ++p; } return 1; } static int valid(char *ciphertext, struct fmt_main *self) { char *cp, *cp2; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return 0; cp = ciphertext + TAG_LENGTH; if (*cp != '$') return 0; ++cp; cp2 = strchr(cp, '$'); if (!cp2) return 0; if (cp2-cp > 32) return 0; cp = &cp2[1]; if (isDigits(cp) == 0) return 0; cp = strchr(cp, '$'); if (!cp) return 0; ++cp; if (isDigits(cp) == 0) return 0; cp = strchr(cp, '$'); if (!cp) return 0; ++cp; if (isDigits(cp) == 0) return 0; cp = strchr(cp, '$'); if (!cp) return 0; ++cp; if (isDigits(cp) == 0) return 0; cp = strchr(cp, '$'); if (!cp) return 0; ++cp; if (strlen(cp) != 88) return 0; return 1; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; /* ensure alignment */ static union { struct custom_salt _cs; ARCH_WORD_32 dummy; } un; static struct custom_salt *cs = &(un._cs); ctcopy += TAG_LENGTH; p = strtok(ctcopy, "$"); strncpy((char*)cs->salt, p, 32); p = strtok(NULL, "$"); cs->N = atoi(p); p = strtok(NULL, "$"); cs->r = atoi(p); p = strtok(NULL, "$"); cs->p = atoi(p); MEM_FREE(keeptr); return (void *)cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE + 1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; p = strrchr(ciphertext, '$') + 1; base64_decode(p, strlen(p), (char*)out); return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { crypto_scrypt((unsigned char*)saved_key[index], strlen((char*)saved_key[index]), cur_salt->salt, strlen((char*)cur_salt->salt), (1ULL) << cur_salt->N, cur_salt->r, cur_salt->p, (unsigned char*)crypt_out[index], BINARY_SIZE); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void scrypt_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } #if FMT_MAIN_VERSION > 11 static unsigned int tunable_cost_N(void *salt) { static struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->N; } static unsigned int tunable_cost_r(void *salt) { static struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->r; } static unsigned int tunable_cost_p(void *salt) { static struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->p; } #endif struct fmt_main fmt_django_scrypt = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "N", "r", "p" }, #endif scrypt_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { tunable_cost_N, tunable_cost_r, tunable_cost_p }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, scrypt_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
arraybench.c
/*************************************************************************** * * * OpenMP MicroBenchmark Suite - Version 2.0 * * * * produced by * * * * Mark Bull and Fiona Reid * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk * * * * * * This version copyright (c) The University of Edinburgh, 2004. * * All rights reserved. * * * **************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "arraybench.h" void delay(int, double*); int main (int argv, char **argc) { nthreads = 1; #pragma omp parallel { #pragma omp master { #ifdef _OPENMP nthreads = omp_get_num_threads(); #endif } } printf(" Running OpenMP benchmark on %d thread(s)\n", nthreads); delaylength = 500; innerreps = 100; /* GENERATE REFERENCE TIME */ refer(); /* TEST PRIVATE */ testprivnew(); /* TEST FIRSTPRIVATE */ testfirstprivnew(); #ifdef OMPVER2 /* TEST COPYPRIVATE */ testcopyprivnew(); #endif /* TEST THREADPRIVATE - COPYIN */ testthrprivnew(); delaylength = 500; innerreps = 100; } void refer() { int j,k; double start; double meantime, sd; double a[1]; double getclock(void); printf("\n"); printf("--------------------------------------------------------\n"); printf("Computing reference time 1\n"); for (k=0; k<=OUTERREPS; k++){ start = getclock(); for (j=0; j<innerreps; j++){ delay(delaylength, a); } times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } stats (&meantime, &sd); printf("Reference_time_1 = %10.3f microseconds +/- %10.3f\n", meantime, CONF95*sd); reftime = meantime; refsd = sd; } void testfirstprivnew() { int n,j,k; double start; double meantime, sd; double getclock(void); n=IDA; printf("\n"); printf("--------------------------------------------------------\n"); printf("Computing FIRSTPRIVATE %d time\n", n); for (k=0; k<=OUTERREPS; k++){ start = getclock(); for (j=0; j<innerreps; j++){ #pragma omp parallel firstprivate(atest) { delay(delaylength, atest); } } times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } stats (&meantime, &sd); printf("FIRSTPRIVATE time = %10.3f microseconds +/- %10.3f\n", meantime, CONF95*sd); printf("FIRSTPRIVATE overhead = %10.3f microseconds +/- %10.3f\n", meantime-reftime, CONF95*(sd+refsd)); } void testprivnew() { int n,j,k; double start; double meantime, sd; double getclock(void); n=IDA; printf("\n"); printf("--------------------------------------------------------\n"); printf("Computing PRIVATE %d time\n", n); for (k=0; k<=OUTERREPS; k++){ start = getclock(); for (j=0; j<innerreps; j++){ #pragma omp parallel private(atest) { delay(delaylength, atest); } } times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } stats (&meantime, &sd); printf("PRIVATE time = %10.3f microseconds +/- %10.3f\n", meantime, CONF95*sd); printf("PRIVATE overhead = %10.3f microseconds +/- %10.3f\n", meantime-reftime, CONF95*(sd+refsd)); } #ifdef OMPVER2 void testcopyprivnew() { int n,j,k; double start; double meantime, sd; double getclock(void); n=IDA; printf("\n"); printf("--------------------------------------------------------\n"); printf("Computing COPYPRIVATE %d time\n", n); for (k=0; k<=OUTERREPS; k++){ start = getclock(); for (j=0; j<innerreps; j++){ #pragma omp single copyprivate(btest) { delay(delaylength, btest); } } times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } stats (&meantime, &sd); printf("COPYPRIVATE time = %10.3f microseconds +/- %10.3f\n", meantime, CONF95*sd); printf("COPYPRIVATE overhead = %10.3f microseconds +/- %10.3f\n", meantime-reftime, CONF95*(sd+refsd)); } #endif void testthrprivnew() { int n,j,k; double start; double meantime, sd; double getclock(void); n=IDA; printf("\n"); printf("--------------------------------------------------------\n"); printf("Computing COPYIN %d time\n", n); for (k=0; k<=OUTERREPS; k++){ start = getclock(); for (j=0; j<innerreps; j++){ #pragma omp parallel copyin(btest) { delay(delaylength, btest); } } times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } stats (&meantime, &sd); printf("COPYIN time = %10.3f microseconds +/- %10.3f\n", meantime, CONF95*sd); printf("COPYIN overhead = %10.3f microseconds +/- %10.3f\n", meantime-reftime, CONF95*(sd+refsd)); } void stats (double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff; int i, nr; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; for (i=1; i<=OUTERREPS; i++){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime +=times[i]; } meantime = totaltime / OUTERREPS; sumsq = 0; for (i=1; i<=OUTERREPS; i++){ sumsq += (times[i]-meantime)* (times[i]-meantime); } sd = sqrt(sumsq/(OUTERREPS-1)); cutoff = 3.0 * sd; nr = 0; for (i=1; i<=OUTERREPS; i++){ if ( fabs(times[i]-meantime) > cutoff ) nr ++; } printf("\n"); printf("Sample_size Average Min Max S.D. Outliers\n"); printf(" %d %10.3f %10.3f %10.3f %10.3f %d\n",OUTERREPS, meantime, mintime, maxtime, sd, nr); printf("\n"); *mtp = meantime; *sdp = sd; }
diagmv_x_csc_u.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t diagmv_csc_u_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT n = A->cols; const ALPHA_INT thread_num = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < n; ++i) { ALPHA_Number tmp; alpha_mul(tmp, alpha, x[i]); alpha_mul(y[i], y[i], beta); alpha_add(y[i], y[i], tmp); // y[i] = beta * y[i] + alpha * x[i]; } return ALPHA_SPARSE_STATUS_SUCCESS; } static alphasparse_status_t diagmv_csc_u_serial(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const int m = A->cols; for(int i = 0; i < m; ++i) { ALPHA_Number tmp; alpha_mul(tmp, alpha, x[i]); alpha_mul(y[i], y[i], beta); alpha_add(y[i], y[i], tmp); // y[i] = beta * y[i] + alpha * x[i]; } return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return diagmv_csc_u_serial(alpha, A, x, beta, y); }
GB_binop__minus_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_08__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_02__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_04__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int16) // A*D function (colscale): GB (_AxD__minus_int16) // D*A function (rowscale): GB (_DxB__minus_int16) // C+=B function (dense accum): GB (_Cdense_accumB__minus_int16) // C+=b function (dense accum): GB (_Cdense_accumb__minus_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int16) // C=scalar+B GB (_bind1st__minus_int16) // C=scalar+B' GB (_bind1st_tran__minus_int16) // C=A+scalar GB (_bind2nd__minus_int16) // C=A'+scalar GB (_bind2nd_tran__minus_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT16 || GxB_NO_MINUS_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nn_index.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ #ifndef FLANN_NNINDEX_H #define FLANN_NNINDEX_H #include <vector> #include "flann/general.h" #include "flann/util/matrix.h" #include "flann/util/params.h" #include "flann/util/result_set.h" #include "flann/util/dynamic_bitset.h" #include "flann/util/saving.h" namespace flann { #define KNN_HEAP_THRESHOLD 250 class IndexBase { public: virtual ~IndexBase() {}; virtual size_t veclen() const = 0; virtual size_t size() const = 0; virtual flann_algorithm_t getType() const = 0; virtual int usedMemory() const = 0; virtual IndexParams getParameters() const = 0; virtual void loadIndex(FILE* stream) = 0; virtual void saveIndex(FILE* stream) = 0; }; /** * Nearest-neighbour index base class */ template <typename Distance> class NNIndex : public IndexBase { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const NNIndex& other) : distance_(other.distance_), last_id_(other.last_id_), size_(other.size_), size_at_build_(other.size_at_build_), veclen_(other.veclen_), index_params_(other.index_params_), removed_(other.removed_), removed_points_(other.removed_points_), removed_count_(other.removed_count_), ids_(other.ids_), points_(other.points_), data_ptr_(NULL) { if (other.data_ptr_) { data_ptr_ = new ElementType[size_*veclen_]; std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } } virtual ~NNIndex() { if (data_ptr_) { delete[] data_ptr_; } } virtual NNIndex* clone() const = 0; /** * Builds the index */ virtual void buildIndex() { freeIndex(); cleanRemovedPoints(); // building index buildIndexImpl(); size_at_build_ = size_; } /** * Builds th index using using the specified dataset * @param dataset the dataset to use */ virtual void buildIndex(const Matrix<ElementType>& dataset) { setDataset(dataset); this->buildIndex(); } /** * @brief Incrementally add points to the index. * @param points Matrix with points to be added * @param rebuild_threshold */ virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2) { throw FLANNException("Functionality not supported by this index"); } /** * Remove point from the index * @param index Index of point to be removed */ virtual void removePoint(size_t id) { if (!removed_) { ids_.resize(size_); for (size_t i=0;i<size_;++i) { ids_[i] = i; } removed_points_.resize(size_); removed_points_.reset(); last_id_ = size_; removed_ = true; } size_t point_index = id_to_index(id); if (point_index!=size_t(-1) && !removed_points_.test(point_index)) { removed_points_.set(point_index); removed_count_++; } } /** * Get point with specific id * @param id * @return */ virtual ElementType* getPoint(size_t id) { size_t index = id_to_index(id); if (index!=size_t(-1)) { return points_[index]; } else { return NULL; } } /** * @return number of features in this index. */ inline size_t size() const { return size_ - removed_count_; } /** * @return The dimensionality of the features in this index. */ inline size_t veclen() const { return veclen_; } /** * Returns the parameters used by the index. * * @return The index parameters */ IndexParams getParameters() const { return index_params_; } template<typename Archive> void serialize(Archive& ar) { IndexHeader header; if (Archive::is_saving::value) { header.data_type = flann_datatype_value<ElementType>::value; header.index_type = getType(); header.rows = size_; header.cols = veclen_; } ar & header; // sanity checks if (Archive::is_loading::value) { if (strcmp(header.signature,FLANN_SIGNATURE_)!=0) { throw FLANNException("Invalid index file, wrong signature"); } if (header.data_type != flann_datatype_value<ElementType>::value) { throw FLANNException("Datatype of saved index is different than of the one to be created."); } if (header.index_type != getType()) { throw FLANNException("Saved index type is different then the current index type."); } // TODO: check for distance type } ar & size_; ar & veclen_; ar & size_at_build_; bool save_dataset; if (Archive::is_saving::value) { save_dataset = get_param(index_params_,"save_dataset", false); } ar & save_dataset; if (save_dataset) { if (Archive::is_loading::value) { if (data_ptr_) { delete[] data_ptr_; } data_ptr_ = new ElementType[size_*veclen_]; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } for (size_t i=0;i<size_;++i) { ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType)); } } else { if (points_.size()!=size_) { throw FLANNException("Saved index does not contain the dataset and no dataset was provided."); } } ar & last_id_; ar & ids_; ar & removed_; if (removed_) { ar & removed_points_; } ar & removed_count_; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ virtual int knnSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); assert(indices.rows >= queries.rows); assert(dists.rows >= queries.rows); assert(indices.cols >= knn); assert(dists.cols >= knn); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } int count = 0; if (use_heap) { #pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += (int)n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += (int)n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ int knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = knnSearch(queries, indices_, dists, knn, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = (int)indices_[i][j]; } } delete[] indices_.ptr(); return result; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); int count = 0; if (use_heap) { #pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } else { #pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = knnSearch(queries, indices_, dists, knn, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indinces of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ int radiusSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; size_t num_neighbors = std::min(indices.cols, dists.cols); int max_neighbors = params.max_neighbors; if (max_neighbors<0) max_neighbors = num_neighbors; else max_neighbors = std::min(max_neighbors,(int)num_neighbors); if (max_neighbors==0) { #pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { // explicitly indicated to use unbounded radius result set // and we know there'll be enough room for resulting indices and dists if (params.max_neighbors<0 && (num_neighbors>=size())) { #pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if (n>num_neighbors) n = num_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } else { // number of neighbors limited to max_neighbors #pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>max_neighbors) n = max_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = radiusSearch(queries, indices_, dists, radius, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indinces of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; // just count neighbors if (params.max_neighbors==0) { #pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); if (params.max_neighbors<0) { // search for all neighbors #pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } else { // number of neighbors limited to max_neighbors #pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors); #pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>params.max_neighbors) n = params.max_neighbors; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = radiusSearch(queries, indices_, dists, radius, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0; protected: virtual void freeIndex() = 0; virtual void buildIndexImpl() = 0; size_t id_to_index(size_t id) { if (ids_.size()==0) { return id; } size_t point_index = size_t(-1); if (ids_[id]==id) { return id; } else { // binary search size_t start = 0; size_t end = ids_.size(); while (start<end) { size_t mid = (start+end)/2; if (ids_[mid]==id) { point_index = mid; break; } else if (ids_[mid]<id) { start = mid + 1; } else { end = mid; } } } return point_index; } void indices_to_ids(const size_t* in, size_t* out, size_t size) const { if (removed_) { for (size_t i=0;i<size;++i) { out[i] = ids_[in[i]]; } } } void setDataset(const Matrix<ElementType>& dataset) { size_ = dataset.rows; veclen_ = dataset.cols; last_id_ = 0; ids_.clear(); removed_points_.clear(); removed_ = false; removed_count_ = 0; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = dataset[i]; } } void extendDataset(const Matrix<ElementType>& new_points) { size_t new_size = size_ + new_points.rows; if (removed_) { removed_points_.resize(new_size); ids_.resize(new_size); } points_.resize(new_size); for (size_t i=size_;i<new_size;++i) { points_[i] = new_points[i-size_]; if (removed_) { ids_[i] = last_id_++; removed_points_.reset(i); } } size_ = new_size; } void cleanRemovedPoints() { if (!removed_) return; size_t last_idx = 0; for (size_t i=0;i<size_;++i) { if (!removed_points_.test(i)) { points_[last_idx] = points_[i]; ids_[last_idx] = ids_[i]; removed_points_.reset(last_idx); ++last_idx; } } points_.resize(last_idx); ids_.resize(last_idx); removed_points_.resize(last_idx); size_ = last_idx; removed_count_ = 0; } void swap(NNIndex& other) { std::swap(distance_, other.distance_); std::swap(last_id_, other.last_id_); std::swap(size_, other.size_); std::swap(size_at_build_, other.size_at_build_); std::swap(veclen_, other.veclen_); std::swap(index_params_, other.index_params_); std::swap(removed_, other.removed_); std::swap(removed_points_, other.removed_points_); std::swap(removed_count_, other.removed_count_); std::swap(ids_, other.ids_); std::swap(points_, other.points_); std::swap(data_ptr_, other.data_ptr_); } protected: /** * The distance functor */ Distance distance_; /** * Each index point has an associated ID. IDs are assigned sequentially in * increasing order. This indicates the ID assigned to the last point added to the * index. */ size_t last_id_; /** * Number of points in the index (and database) */ size_t size_; /** * Number of features in the dataset when the index was last built. */ size_t size_at_build_; /** * Size of one point in the index (and database) */ size_t veclen_; /** * Parameters of the index. */ IndexParams index_params_; /** * Flag indicating if at least a point was removed from the index */ bool removed_; /** * Array used to mark points removed from the index */ DynamicBitset removed_points_; /** * Number of points removed from the index */ size_t removed_count_; /** * Array of point IDs, returned by nearest-neighbour operations */ std::vector<size_t> ids_; /** * Point data */ std::vector<ElementType*> points_; /** * Pointer to dataset memory if allocated by this index, otherwise NULL */ ElementType* data_ptr_; }; #define USING_BASECLASS_SYMBOLS \ using NNIndex<Distance>::distance_;\ using NNIndex<Distance>::size_;\ using NNIndex<Distance>::size_at_build_;\ using NNIndex<Distance>::veclen_;\ using NNIndex<Distance>::index_params_;\ using NNIndex<Distance>::removed_points_;\ using NNIndex<Distance>::ids_;\ using NNIndex<Distance>::removed_;\ using NNIndex<Distance>::points_;\ using NNIndex<Distance>::extendDataset;\ using NNIndex<Distance>::setDataset;\ using NNIndex<Distance>::cleanRemovedPoints;\ using NNIndex<Distance>::indices_to_ids; } #endif //FLANN_NNINDEX_H
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/gem.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the RGBTransformImage method is: % % MagickBooleanType RGBTransformImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { double l, m, s; /* Convert XYZ to LMS colorspace. */ assert(L != (double *) NULL); assert(M != (double *) NULL); assert(S != (double *) NULL); l=0.7328f*x+0.4296f*y-0.1624f*z; m=(-0.7036f*x+1.6975f*y+0.0415f*z); s=0.0030f*x+0.0136f*y+0.9834f*z; *L=QuantumRange*l; *M=QuantumRange*m; *S=QuantumRange*s; } static inline void ConvertRGBToXYZ(const Quantum red,const Quantum green, const Quantum blue,double *X,double *Y,double *Z) { double b, g, r; assert(X != (double *) NULL); assert(Y != (double *) NULL); assert(Z != (double *) NULL); r=QuantumScale*red; g=QuantumScale*green; b=QuantumScale*blue; *X=0.41239558896741421610*r+0.35758343076371481710*g+0.18049264738170157350*b; *Y=0.21258623078559555160*r+0.71517030370341084990*g+0.07220049864333622685*b; *Z=0.01929721549174694484*r+0.11918386458084853180*g+0.95049712513157976600*b; } static inline void ConvertXYZToLab(const double X,const double Y,const double Z, double *L,double *a,double *b) { #define D65X (0.950456f) #define D65Y (1.0f) #define D65Z (1.08874f) #define CIEEpsilon (216.0f/24389.0f) #define CIEK (24389.0f/27.0f) double x, y, z; assert(L != (double *) NULL); assert(a != (double *) NULL); assert(b != (double *) NULL); if ((X/D65X) > CIEEpsilon) x=pow(X/D65X,1.0/3.0); else x=(CIEK*X/D65X+16.0f)/116.0f; if ((Y/D65Y) > CIEEpsilon) y=pow(Y/D65Y,1.0/3.0); else y=(CIEK*Y/D65Y+16.0f)/116.0f; if ((Z/D65Z) > CIEEpsilon) z=pow(Z/D65Z,1.0/3.0); else z=(CIEK*Z/D65Z+16.0f)/116.0f; *L=((116.0f*y)-16.0f)/100.0f; *a=(500.0f*(x-y))/255.0f+0.5f; *b=(200.0f*(y-z))/255.0f+0.5f; } static inline void ConvertXYZToLuv(const double X,const double Y,const double Z, double *L,double *u,double *v) { double alpha; assert(L != (double *) NULL); assert(u != (double *) NULL); assert(v != (double *) NULL); if ((Y/D65Y) > CIEEpsilon) *L=(double) (116.0f*pow(Y/D65Y,1.0/3.0)-16.0f); else *L=CIEK*(Y/D65Y); alpha=PerceptibleReciprocal(X+15.0f*Y+3.0f*Z); *u=13.0f*(*L)*((4.0f*alpha*X)-(4.0f*D65X/(D65X+15.0f*D65Y+3.0f*D65Z))); *v=13.0f*(*L)*((9.0f*alpha*Y)-(9.0f*D65Y/(D65X+15.0f*D65Y+3.0f*D65Z))); *L/=100.0f; *u=(*u+134.0f)/354.0f; *v=(*v+140.0f)/262.0f; } MagickExport MagickBooleanType RGBTransformImage(Image *image, const ColorspaceType colorspace) { #define RGBTransformImageTag "RGBTransform/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYColorspace: { /* Convert RGB to CMY colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double cyan, magenta, yellow; cyan=DecodePixelGamma((MagickRealType) GetPixelCyan(q)); magenta=DecodePixelGamma((MagickRealType) GetPixelMagenta(q)); yellow=DecodePixelGamma((MagickRealType) GetPixelYellow(q)); SetPixelCyan(q,ClampToQuantum((MagickRealType) (QuantumRange- cyan))); SetPixelMagenta(q,ClampToQuantum((MagickRealType) (QuantumRange- magenta))); SetPixelYellow(q,ClampToQuantum((MagickRealType) (QuantumRange- yellow))); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case CMYKColorspace: { MagickPixelPacket zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); pixel.red=DecodePixelGamma((MagickRealType) pixel.red); pixel.green=DecodePixelGamma((MagickRealType) pixel.green); pixel.blue=DecodePixelGamma((MagickRealType) pixel.blue); ConvertRGBToCMYK(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: case Rec601LumaColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, gray, green, red; red=DecodePixelGamma((MagickRealType) GetPixelRed(q)); green=DecodePixelGamma((MagickRealType) GetPixelGreen(q)); blue=DecodePixelGamma((MagickRealType) GetPixelBlue(q)); gray=0.298839f*red+0.586811f*green+0.114350f*blue; SetPixelGray(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case HCLColorspace: { /* Transform image from sRGB to HCL. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double chroma, hue, luma; Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); ConvertRGBToHCL(red,green,blue,&hue,&chroma,&luma); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*hue)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*chroma)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*luma)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case HSBColorspace: { /* Transform image from sRGB to HSB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double brightness, hue, saturation; Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); ConvertRGBToHSB(red,green,blue,&hue,&saturation,&brightness); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange* hue)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange* saturation)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange* brightness)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case HSLColorspace: { /* Transform image from sRGB to HSL. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double hue, lightness, saturation; Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); ConvertRGBToHSL(red,green,blue,&hue,&saturation,&lightness); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange* hue)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange* saturation)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange* lightness)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case HWBColorspace: { /* Transform image from sRGB to HWB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blackness, hue, whiteness; Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); ConvertRGBToHWB(red,green,blue,&hue,&whiteness,&blackness); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange* hue)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange* whiteness)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange* blackness)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LabColorspace: { /* Transform image from sRGB to Lab. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b, L, X, Y, Z; Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,&L,&a,&b); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*L)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*a)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*b)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LCHColorspace: { /* Transform image from sRGB to LCH. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b, C, H, L, X, Y, Z; Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,&L,&a,&b); C=sqrt(a*a+b*b); H=atan2(b,a)*180.0/MagickPI; if (H < 0.0) H+=1.0; SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*L)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*C)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*H)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LMSColorspace: { /* Transform image from sRGB to LMS. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double L, M, S, X, Y, Z; Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,&L,&M,&S); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*L)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*M)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*S)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0f/1.7f) #define FilmGamma 0.6f #define ReferenceBlack 95.0f #define ReferenceWhite 685.0f const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002f/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002f/ film_gamma))/1024.0f)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,logmap[ScaleQuantumToMap(red)]); SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]); SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LuvColorspace: { /* Transform image from sRGB to Luv. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double L, u, v, X, Y, Z; Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,&L,&u,&v); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*L)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*u)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*v)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case Rec709LumaColorspace: { /* Transform image from sRGB to Rec709Luma. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gray; Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); gray=0.212600f*red+0.715200f*green+0.072200f*blue; SetPixelGray(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case RGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case XYZColorspace: { /* Transform image from sRGB to XYZ. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0f)/2.0f; primary_info.z=(double) (MaxMap+1.0f)/2.0f; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333f*(float) i); y_map[i].x=(MagickRealType) (0.33334f*(float) i); z_map[i].x=(MagickRealType) (0.33333f*(float) i); x_map[i].y=(MagickRealType) (0.50000f*(float) i); y_map[i].y=(MagickRealType) (0.00000f*(float) i); z_map[i].y=(MagickRealType) (-0.50000f*(float) i); x_map[i].z=(MagickRealType) (-0.25000f*(float) i); y_map[i].z=(MagickRealType) (0.50000f*(float) i); z_map[i].z=(MagickRealType) (-0.25000f*(float) i); } break; } case Rec601YCbCrColorspace: case YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.298839f0*R+0.586811f0*G+0.114350f0*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0f)/2.0f; primary_info.z=(double) (MaxMap+1.0f)/2.0f; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839f*(float) i); y_map[i].x=(MagickRealType) (0.586811f*(float) i); z_map[i].x=(MagickRealType) (0.114350f*(float) i); x_map[i].y=(MagickRealType) (-0.1687367f*(float) i); y_map[i].y=(MagickRealType) (-0.331264f*(float) i); z_map[i].y=(MagickRealType) (0.500000f*(float) i); x_map[i].z=(MagickRealType) (0.500000f*(float) i); y_map[i].z=(MagickRealType) (-0.418688f*(float) i); z_map[i].z=(MagickRealType) (-0.081312f*(float) i); } break; } case Rec709LumaColorspace: { /* Initialize Rec709 luma tables: G = 0.21260*R+0.71520*G+0.07220*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.21260f*(float) i); y_map[i].x=(MagickRealType) (0.71520f*(float) i); z_map[i].x=(MagickRealType) (0.07220f*(float) i); x_map[i].y=(MagickRealType) (0.21260f*(float) i); y_map[i].y=(MagickRealType) (0.71520f*(float) i); z_map[i].y=(MagickRealType) (0.07220f*(float) i); x_map[i].z=(MagickRealType) (0.21260f*(float) i); y_map[i].z=(MagickRealType) (0.71520f*(float) i); z_map[i].z=(MagickRealType) (0.07220f*(float) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212600*R+0.715200*G+0.072200*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0f)/2.0f; primary_info.z=(double) (MaxMap+1.0f)/2.0f; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212600f*(float) i); y_map[i].x=(MagickRealType) (0.715200f*(float) i); z_map[i].x=(MagickRealType) (0.072200f*(float) i); x_map[i].y=(MagickRealType) (-0.114572f*(float) i); y_map[i].y=(MagickRealType) (-0.385428f*(float) i); z_map[i].y=(MagickRealType) (0.500000f*(float) i); x_map[i].z=(MagickRealType) (0.500000f*(float) i); y_map[i].z=(MagickRealType) (-0.454153f*(float) i); z_map[i].z=(MagickRealType) (-0.045847f*(float) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839f*R+0.586811f*G+0.114350f*B C1= -0.298839f*R-0.586811f*G+0.88600*B C2= 0.70100*R-0.586811f*G-0.114350f*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.003962014134275617*i; y_map[i].x=0.007778268551236748*i; z_map[i].x=0.001510600706713781*i; x_map[i].y=(-0.002426619775463276)*i; y_map[i].y=(-0.004763965913702149)*i; z_map[i].y=0.007190585689165425*i; x_map[i].z=0.006927257754597858*i; y_map[i].z=(-0.005800713697502058)*i; z_map[i].z=(-0.0011265440570958)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.2201118963486454*(1.099f*i-0.099f); y_map[i].x=0.4321260306242638*(1.099f*i-0.099f); z_map[i].x=0.08392226148409894*(1.099f*i-0.099f); x_map[i].y=(-0.1348122097479598)*(1.099f*i-0.099f); y_map[i].y=(-0.2646647729834528)*(1.099f*i-0.099f); z_map[i].y=0.3994769827314126*(1.099f*i-0.099f); x_map[i].z=0.3848476530332144*(1.099f*i-0.099f); y_map[i].z=(-0.3222618720834477)*(1.099f*i-0.099f); z_map[i].z=(-0.06258578094976668)*(1.099f*i-0.099f); } break; } case YIQColorspace: { /* Initialize YIQ tables: Y = 0.298839f*R+0.586811f*G+0.114350f*B I = 0.595716*R-0.274453*G-0.321263*B Q = 0.211456*R-0.522591*G+0.311135*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0f)/2.0f; primary_info.z=(double) (MaxMap+1.0f)/2.0f; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839f*(float) i); y_map[i].x=(MagickRealType) (0.586811f*(float) i); z_map[i].x=(MagickRealType) (0.114350f*(float) i); x_map[i].y=(MagickRealType) (0.595716f*(float) i); y_map[i].y=(MagickRealType) (-0.274453f*(float) i); z_map[i].y=(MagickRealType) (-0.321263f*(float) i); x_map[i].z=(MagickRealType) (0.211456f*(float) i); y_map[i].z=(MagickRealType) (-0.522591f*(float) i); z_map[i].z=(MagickRealType) (0.311135f*(float) i); } break; } case YPbPrColorspace: { /* Initialize YPbPr tables (ITU-R BT.601): Y = 0.298839f0*R+0.586811f0*G+0.114350f0*B Pb= -0.1687367*R-0.3312640*G+0.5000000*B Pr= 0.5000000*R-0.4186880*G-0.0813120*B Pb and Pr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0f)/2.0f; primary_info.z=(double) (MaxMap+1.0f)/2.0f; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839f*(float) i); y_map[i].x=(MagickRealType) (0.586811f*(float) i); z_map[i].x=(MagickRealType) (0.114350f*(float) i); x_map[i].y=(MagickRealType) (-0.1687367f*(float) i); y_map[i].y=(MagickRealType) (-0.331264f*(float) i); z_map[i].y=(MagickRealType) (0.500000f*(float) i); x_map[i].z=(MagickRealType) (0.500000f*(float) i); y_map[i].z=(MagickRealType) (-0.418688f*(float) i); z_map[i].z=(MagickRealType) (-0.081312f*(float) i); } break; } case YUVColorspace: { /* Initialize YUV tables: Y = 0.298839f*R+0.586811f*G+0.114350f*B U = -0.147130*R-0.288860*G+0.436000*B V = 0.615000*R-0.514990*G-0.100010*B U and V, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. Note that U = 0.493*(B-Y), V = 0.877*(R-Y). */ primary_info.y=(double) (MaxMap+1.0f)/2.0f; primary_info.z=(double) (MaxMap+1.0f)/2.0f; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839f*(float) i); y_map[i].x=(MagickRealType) (0.586811f*(float) i); z_map[i].x=(MagickRealType) (0.114350f*(float) i); x_map[i].y=(MagickRealType) (-0.147130f*(float) i); y_map[i].y=(MagickRealType) (-0.288860f*(float) i); z_map[i].y=(MagickRealType) (0.436000f*(float) i); x_map[i].z=(MagickRealType) (0.615000f*(float) i); y_map[i].z=(MagickRealType) (-0.514990f*(float) i); z_map[i].z=(MagickRealType) (-0.100001f*(float) i); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(float) i); y_map[i].x=(MagickRealType) 0.0f; z_map[i].x=(MagickRealType) 0.0f; x_map[i].y=(MagickRealType) 0.0f; y_map[i].y=(MagickRealType) (1.0*(float) i); z_map[i].y=(MagickRealType) 0.0f; x_map[i].z=(MagickRealType) 0.0f; y_map[i].z=(MagickRealType) 0.0f; z_map[i].z=(MagickRealType) (1.0*(float) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *restrict q; register size_t blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum(DecodePixelGamma( (MagickRealType) GetPixelRed(q)))); green=ScaleQuantumToMap(ClampToQuantum(DecodePixelGamma( (MagickRealType) GetPixelGreen(q)))); blue=ScaleQuantumToMap(ClampToQuantum(DecodePixelGamma( (MagickRealType) GetPixelBlue(q)))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ (MagickRealType) primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ (MagickRealType) primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ (MagickRealType) primary_info.z; SetPixelRed(q,ScaleMapToQuantum(pixel.red)); SetPixelGreen(q,ScaleMapToQuantum(pixel.green)); SetPixelBlue(q,ScaleMapToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RGBTransformImage) #endif proceed=SetImageProgress(image,RGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register size_t blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; red=ScaleQuantumToMap(ClampToQuantum(DecodePixelGamma((MagickRealType) image->colormap[i].red))); green=ScaleQuantumToMap(ClampToQuantum(DecodePixelGamma((MagickRealType) image->colormap[i].green))); blue=ScaleQuantumToMap(ClampToQuantum(DecodePixelGamma((MagickRealType) image->colormap[i].blue))); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=ScaleMapToQuantum(pixel.red); image->colormap[i].green=ScaleMapToQuantum(pixel.green); image->colormap[i].blue=ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace) { if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000f; (void) ResetMagickMemory(&image->chromaticity,0,sizeof(image->chromaticity)); if (IssRGBColorspace(colorspace) != MagickFalse) { image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; } if (IsGrayColorspace(colorspace) != MagickFalse) image->type=GrayscaleType; return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace)); if (image->colorspace == colorspace) return(MagickTrue); /* same colorspace: no op */ /* Convert the reference image from an alternate colorspace to sRGB. */ (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformRGBImage(image,colorspace)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformRGBImage(image,image->colorspace); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (RGBTransformImage(image,colorspace) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % The format of the TransformRGBImage method is: % % MagickBooleanType TransformRGBImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { double l, m, s; assert(X != (double *) NULL); assert(Y != (double *) NULL); assert(Z != (double *) NULL); l=QuantumScale*L; m=QuantumScale*M; s=QuantumScale*S; *X=1.096123820835514*l-0.278869000218287*m+0.182745179382773*s; *Y=0.454369041975359*l+0.473533154307412*m+0.072097803717229*s; *Z=(-0.009627608738429)*l-0.005698031216113*m+1.015325639954543*s; } static inline void ConvertLabToXYZ(const double L,const double a,const double b, double *X,double *Y,double *Z) { double x, y, z; assert(X != (double *) NULL); assert(Y != (double *) NULL); assert(Z != (double *) NULL); y=(100.0f*L+16.0f)/116.0f; x=y+255.0f*(a-0.5f)/500.0f; z=y-255.0f*(b-0.5f)/200.0f; if ((x*x*x) > CIEEpsilon) x=(x*x*x); else x=(116.0f*x-16.0f)/CIEK; if ((y*y*y) > CIEEpsilon) y=(y*y*y); else y=(100.0f*L)/CIEK; if ((z*z*z) > CIEEpsilon) z=(z*z*z); else z=(116.0f*z-16.0f)/CIEK; *X=D65X*x; *Y=D65Y*y; *Z=D65Z*z; } static inline void ConvertLuvToXYZ(const double L,const double u,const double v, double *X,double *Y,double *Z) { assert(X != (double *) NULL); assert(Y != (double *) NULL); assert(Z != (double *) NULL); if ((100.0f*L) > (CIEK*CIEEpsilon)) *Y=(double) pow(((100.0*L)+16.0)/116.0,3.0); else *Y=(100.0f*L)/CIEK; *X=((*Y*((39.0f*(100.0f*L)/((262.0f*v-140.0f)+13.0f*(100.0f*L)*(9.0f*D65Y/ (D65X+15.0f*D65Y+3.0f*D65Z))))-5.0f))+5.0f*(*Y))/((((52.0f*(100.0f*L)/ ((354.0f*u-134.0f)+13.0f*(100.0f*L)*(4.0f*D65X/(D65X+15.0f*D65Y+3.0f* D65Z))))-1.0f)/3.0f)-(-1.0f/3.0f)); *Z=(*X*(((52.0f*(100.0f*L)/((354.0f*u-134.0f)+13.0f*(100.0f*L)*(4.0f*D65X/ (D65X+15.0f*D65Y+3.0f*D65Z))))-1.0f)/3.0f))-5.0f*(*Y); } static inline ssize_t RoundToYCC(const MagickRealType value) { if (value <= 0.0f) return(0); if (value >= 1388.0f) return(1388); return((ssize_t) (value+0.5f)); } static inline void ConvertXYZToRGB(const double x,const double y,const double z, Quantum *red,Quantum *green,Quantum *blue) { double b, g, r; /* Convert XYZ to sRGB colorspace. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); r=3.2406f*x-1.5372f*y-0.4986f*z; g=(-0.9689f*x+1.8758f*y+0.0415f*z); b=0.0557f*x-0.2040f*y+1.0570f*z; *red=ClampToQuantum((MagickRealType) QuantumRange*r); *green=ClampToQuantum((MagickRealType) QuantumRange*g); *blue=ClampToQuantum((MagickRealType) QuantumRange*b); } static inline void ConvertCMYKToRGB(MagickPixelPacket *pixel) { pixel->red=((QuantumRange-(QuantumScale*pixel->red* (QuantumRange-pixel->index)+pixel->index))); pixel->green=((QuantumRange-(QuantumScale*pixel->green* (QuantumRange-pixel->index)+pixel->index))); pixel->blue=((QuantumRange-(QuantumScale*pixel->blue* (QuantumRange-pixel->index)+pixel->index))); } MagickExport MagickBooleanType TransformRGBImage(Image *image, const ColorspaceType colorspace) { #define TransformRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000 }; CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; exception=(&image->exception); switch (image->colorspace) { case CMYColorspace: { /* Transform image from CMY to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum cyan, magenta, yellow; cyan=ClampToQuantum(EncodePixelGamma((MagickRealType) (QuantumRange-GetPixelCyan(q)))); magenta=ClampToQuantum(EncodePixelGamma((MagickRealType) (QuantumRange-GetPixelMagenta(q)))); yellow=ClampToQuantum(EncodePixelGamma((MagickRealType) (QuantumRange-GetPixelYellow(q)))); SetPixelCyan(q,cyan); SetPixelMagenta(q,magenta); SetPixelYellow(q,yellow); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case CMYKColorspace: { MagickPixelPacket zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); ConvertCMYKToRGB(&pixel); pixel.red=EncodePixelGamma(pixel.red); pixel.green=EncodePixelGamma(pixel.green); pixel.blue=EncodePixelGamma(pixel.blue); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double gray; gray=EncodePixelGamma((MagickRealType) GetPixelGray(q)); SetPixelRed(q,ClampToQuantum(gray)); SetPixelGreen(q,ClampToQuantum(gray)); SetPixelBlue(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case HCLColorspace: { /* Transform image from HCL to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double chroma, hue, luma; Quantum blue, green, red; hue=(double) (QuantumScale*GetPixelRed(q)); chroma=(double) (QuantumScale*GetPixelGreen(q)); luma=(double) (QuantumScale*GetPixelBlue(q)); ConvertHCLToRGB(hue,chroma,luma,&red,&green,&blue); red=ClampToQuantum(EncodePixelGamma((MagickRealType) red)); green=ClampToQuantum(EncodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue)); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case HSBColorspace: { /* Transform image from HSB to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double brightness, hue, saturation; Quantum blue, green, red; hue=(double) (QuantumScale*GetPixelRed(q)); saturation=(double) (QuantumScale*GetPixelGreen(q)); brightness=(double) (QuantumScale*GetPixelBlue(q)); ConvertHSBToRGB(hue,saturation,brightness,&red,&green,&blue); red=ClampToQuantum(EncodePixelGamma((MagickRealType) red)); green=ClampToQuantum(EncodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue)); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case HSLColorspace: { /* Transform image from HSL to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double hue, lightness, saturation; Quantum blue, green, red; hue=(double) (QuantumScale*GetPixelRed(q)); saturation=(double) (QuantumScale*GetPixelGreen(q)); lightness=(double) (QuantumScale*GetPixelBlue(q)); ConvertHSLToRGB(hue,saturation,lightness,&red,&green,&blue); red=ClampToQuantum(EncodePixelGamma((MagickRealType) red)); green=ClampToQuantum(EncodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue)); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case HWBColorspace: { /* Transform image from HWB to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blackness, hue, whiteness; Quantum blue, green, red; hue=(double) (QuantumScale*GetPixelRed(q)); whiteness=(double) (QuantumScale*GetPixelGreen(q)); blackness=(double) (QuantumScale*GetPixelBlue(q)); ConvertHWBToRGB(hue,whiteness,blackness,&red,&green,&blue); red=ClampToQuantum(EncodePixelGamma((MagickRealType) red)); green=ClampToQuantum(EncodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue)); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LabColorspace: { /* Transform image from Lab to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b, L, X, Y, Z; Quantum blue, green, red; L=QuantumScale*GetPixelRed(q); a=QuantumScale*GetPixelGreen(q); b=QuantumScale*GetPixelBlue(q); ConvertLabToXYZ(L,a,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); red=ClampToQuantum(EncodePixelGamma((MagickRealType) red)); green=ClampToQuantum(EncodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue)); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LCHColorspace: { /* Transform image from LCH to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b, C, H, L, X, Y, Z; Quantum blue, green, red; L=QuantumScale*GetPixelRed(q); C=QuantumScale*GetPixelGreen(q); H=QuantumScale*GetPixelBlue(q); a=C*cos(H*(MagickPI/180.0f)); b=C*sin(H*(MagickPI/180.0f)); ConvertLabToXYZ(L,a,b,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); red=ClampToQuantum(EncodePixelGamma((MagickRealType) red)); green=ClampToQuantum(EncodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue)); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LMSColorspace: { /* Transform image from LMS to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double L, M, S, X, Y, Z; Quantum blue, green, red; L=QuantumScale*GetPixelRed(q); M=QuantumScale*GetPixelGreen(q); S=QuantumScale*GetPixelBlue(q); ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); red=ClampToQuantum(EncodePixelGamma((MagickRealType) red)); green=ClampToQuantum(EncodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue)); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002f/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0f); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0f); i++) logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0f-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002f/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelRed(q))])); green=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelGreen(q))])); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelBlue(q))])); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LuvColorspace: { /* Transform image from Luv to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double L, u, v, X, Y, Z; Quantum blue, green, red; L=QuantumScale*GetPixelRed(q); u=QuantumScale*GetPixelGreen(q); v=QuantumScale*GetPixelBlue(q); ConvertLuvToXYZ(L,u,v,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); red=ClampToQuantum(EncodePixelGamma((MagickRealType) red)); green=ClampToQuantum(EncodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue)); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case XYZColorspace: { /* Transform image from XYZ to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; X=QuantumScale*GetPixelRed(q); Y=QuantumScale*GetPixelGreen(q); Z=QuantumScale*GetPixelBlue(q); ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); red=ClampToQuantum(EncodePixelGamma((MagickRealType) red)); green=ClampToQuantum(EncodePixelGamma((MagickRealType) green)); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) blue)); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0f*(float) i); y_map[i].x=(MagickRealType) (0.500000f*(2.0f*(float) i-MaxMap)); z_map[i].x=(MagickRealType) ((-0.333340f)*(2.0f*(float) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0f*(float) i); y_map[i].y=(MagickRealType) (0.000000f); z_map[i].y=(MagickRealType) (0.666665f*(2.0f*(float) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0f*(float) i); y_map[i].z=(MagickRealType) ((-0.500000f)*(2.0f*(float) i-MaxMap)); z_map[i].z=(MagickRealType) ((-0.333340f)*(2.0f*(float) i-MaxMap)); } break; } case Rec601YCbCrColorspace: case YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*i; y_map[i].x=(1.2188941887145875e-06)*i; z_map[i].x=0.5f*1.4019995886561440468*(2.00f*i-MaxMap); x_map[i].y=0.99999975910502514331*i; y_map[i].y=0.5f*(-0.34413567816504303521)*(2.00f*i-MaxMap); z_map[i].y=0.5f*(-0.71413649331646789076)*(2.00f*i-MaxMap); x_map[i].z=1.00000124040004623180*i; y_map[i].z=0.5f*1.77200006607230409200*(2.00f*i-MaxMap); z_map[i].z=2.1453384174593273e-06*i; } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0f*(float) i); y_map[i].x=(MagickRealType) (0.000000f*(2.0f*(float) i-MaxMap)); z_map[i].x=(MagickRealType) (0.5f*1.574800f*(2.0f*(float) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0f*(float) i); y_map[i].y=(MagickRealType) (0.5f*(-0.187324f)*(2.0f*(float) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5f*(-0.468124f)*(2.0f*(float) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0f*(float) i); y_map[i].z=(MagickRealType) (0.5f*1.855600f*(2.0f*(float) i-MaxMap)); z_map[i].z=(MagickRealType) (0.000000f*(2.0f*(float) i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000f*(float) i); y_map[i].x=(MagickRealType) (0.0000000f); z_map[i].x=(MagickRealType) (1.8215000f*((float) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000f*(float) i); y_map[i].y=(MagickRealType) ((-0.4302726f)*((float) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) ((-0.9271435f)*((float) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000f*(float) i); y_map[i].z=(MagickRealType) (2.2179000f*((float) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) (0.0000000f); } break; } case YIQColorspace: { /* Initialize YIQ tables: R = Y+0.95620*I+0.62140*Q G = Y-0.27270*I-0.64680*Q B = Y-1.10370*I+1.70060*Q I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=1.0f*i; y_map[i].x=0.5f*0.9562957197589482261*(2.00000f*i-MaxMap); z_map[i].x=0.5f*0.6210244164652610754*(2.00000f*i-MaxMap); x_map[i].y=1.0f*i; y_map[i].y=0.5f*(-0.2721220993185104464)*(2.00000f*i-MaxMap); z_map[i].y=0.5f*(-0.6473805968256950427)*(2.00000f*i-MaxMap); x_map[i].z=1.0f*i; y_map[i].z=0.5f*(-1.1069890167364901945)*(2.00000f*i-MaxMap); z_map[i].z=0.5f*1.7046149983646481374*(2.00000f*i-MaxMap); } break; } case YPbPrColorspace: { /* Initialize YPbPr tables: R = Y +1.402000*C2 G = Y-0.344136*C1+0.714136*C2 B = Y+1.772000*C1 Pb and Pr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*i; y_map[i].x=(-1.2188941887145875e-06)*(2.0f*i-MaxMap); z_map[i].x=0.5f*1.4019995886561440468*(2.0f*i-MaxMap); x_map[i].y=0.99999975910502514331*i; y_map[i].y=0.5f*(-0.34413567816504303521)*(2.0f*i-MaxMap); z_map[i].y=0.5f*(-0.71413649331646789076)*(2.0f*i-MaxMap); x_map[i].z=1.00000124040004623180*i; y_map[i].z=0.5f*1.77200006607230409200*(2.0f*i-MaxMap); z_map[i].z=2.1453384174593273e-06*(2.0f*i-MaxMap); } break; } case YUVColorspace: { /* Initialize YUV tables: R = Y +1.13983*V G = Y-0.39464*U-0.58060*V B = Y+2.03211*U U and V, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=1.0f*i; y_map[i].x=(-3.945707070708279e-05)*(2.0f*i-MaxMap); z_map[i].x=0.5f*1.1398279671717170825*(2.0f*i-MaxMap); x_map[i].y=1.0f*i; y_map[i].y=0.5f*(-0.3946101641414141437)*(2.0f*i-MaxMap); z_map[i].y=0.5f*(-0.5805003156565656797)*(2.0f*i-MaxMap); x_map[i].z=1.0f*i; y_map[i].z=0.5f*2.0319996843434342537*(2.0f*i-MaxMap); z_map[i].z=(-4.813762626262513e-04)*(2.0f*i-MaxMap); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(float) i); y_map[i].x=(MagickRealType) 0.0f; z_map[i].x=(MagickRealType) 0.0f; x_map[i].y=(MagickRealType) 0.0f; y_map[i].y=(MagickRealType) (1.0*(float) i); z_map[i].y=(MagickRealType) 0.0f; x_map[i].z=(MagickRealType) 0.0f; y_map[i].z=(MagickRealType) 0.0f; z_map[i].z=(MagickRealType) (1.0*(float) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(q)); green=ScaleQuantumToMap(GetPixelGreen(q)); blue=ScaleQuantumToMap(GetPixelBlue(q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0f*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0f*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0f*pixel.blue/ (double) MaxMap)]; } else { pixel.red=EncodePixelGamma((MagickRealType) ScaleMapToQuantum(pixel.red)); pixel.green=EncodePixelGamma((MagickRealType) ScaleMapToQuantum(pixel.green)); pixel.blue=EncodePixelGamma((MagickRealType) ScaleMapToQuantum(pixel.blue)); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformRGBImage) #endif proceed=SetImageProgress(image,TransformRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; register size_t blue, green, red; red=ScaleQuantumToMap(image->colormap[i].red); green=ScaleQuantumToMap(image->colormap[i].green); blue=ScaleQuantumToMap(image->colormap[i].blue); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0f*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0f*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0f*pixel.blue/ (double) MaxMap)]; } else { pixel.red=EncodePixelGamma((MagickRealType) ScaleMapToQuantum( pixel.red)); pixel.green=EncodePixelGamma((MagickRealType) ScaleMapToQuantum( pixel.green)); pixel.blue=EncodePixelGamma((MagickRealType) ScaleMapToQuantum( pixel.blue)); } image->colormap[i].red=ClampToQuantum(pixel.red); image->colormap[i].green=ClampToQuantum(pixel.green); image->colormap[i].blue=ClampToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(MagickTrue); }
interppotential_calc_potential.c
/* C code for calculating a potential and its forces on a grid */ #ifdef _WIN32 #include <Python.h> #endif #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #define CHUNKSIZE 1 //Potentials #include <galpy_potentials.h> #include <actionAngle.h> #include <integrateFullOrbit.h> #include <interp_2d.h> #include <cubic_bspline_2d_coeffs.h> //Macros to export functions in DLL on different OS #if defined(_WIN32) #define EXPORT __declspec(dllexport) #elif defined(__GNUC__) #define EXPORT __attribute__((visibility("default"))) #else // Just do nothing? #define EXPORT #endif /* MAIN FUNCTIONS */ EXPORT void calc_potential(int nR, double *R, int nz, double *z, int npot, int * pot_type, double * pot_args, double *out, int * err){ int ii, jj, tid, nthreads; #ifdef _OPENMP nthreads = omp_get_max_threads(); #else nthreads = 1; #endif double * row= (double *) malloc ( nthreads * nz * ( sizeof ( double ) ) ); //Set up the potentials struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(npot,potentialArgs,&pot_type,&pot_args); //Run through the grid and calculate UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) private(ii,tid,jj) \ shared(row,npot,potentialArgs,R,z,nR,nz) for (ii=0; ii < nR; ii++){ #ifdef _OPENMP tid= omp_get_thread_num(); #else tid = 0; #endif for (jj=0; jj < nz; jj++){ *(row+jj+tid*nz)= evaluatePotentials(*(R+ii),*(z+jj),npot,potentialArgs); } put_row(out,ii,row+tid*nz,nz); } free_potentialArgs(npot,potentialArgs); free(potentialArgs); free(row); } EXPORT void calc_rforce(int nR, double *R, int nz, double *z, int npot, int * pot_type, double * pot_args, double *out, int * err){ int ii, jj, tid, nthreads; #ifdef _OPENMP nthreads = omp_get_max_threads(); #else nthreads = 1; #endif double * row= (double *) malloc ( nthreads * nz * ( sizeof ( double ) ) ); //Set up the potentials struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(npot,potentialArgs,&pot_type,&pot_args); //Run through the grid and calculate UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) private(ii,tid,jj) \ shared(row,npot,potentialArgs,R,z,nR,nz) for (ii=0; ii < nR; ii++){ #ifdef _OPENMP tid= omp_get_thread_num(); #else tid = 0; #endif for (jj=0; jj < nz; jj++){ *(row+jj+tid*nz)= calcRforce(*(R+ii),*(z+jj),0.,0.,npot,potentialArgs); } put_row(out,ii,row+tid*nz,nz); } free_potentialArgs(npot,potentialArgs); free(potentialArgs); free(row); } EXPORT void calc_zforce(int nR, double *R, int nz, double *z, int npot, int * pot_type, double * pot_args, double *out, int * err){ int ii, jj, tid, nthreads; #ifdef _OPENMP nthreads = omp_get_max_threads(); #else nthreads = 1; #endif double * row= (double *) malloc ( nthreads * nz * ( sizeof ( double ) ) ); //Set up the potentials struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(npot,potentialArgs,&pot_type,&pot_args); //Run through the grid and calculate UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) private(ii,tid,jj) \ shared(row,npot,potentialArgs,R,z,nR,nz) for (ii=0; ii < nR; ii++){ #ifdef _OPENMP tid= omp_get_thread_num(); #else tid = 0; #endif for (jj=0; jj < nz; jj++){ *(row+jj+tid*nz)= calczforce(*(R+ii),*(z+jj),0.,0.,npot,potentialArgs); } put_row(out,ii,row+tid*nz,nz); } free_potentialArgs(npot,potentialArgs); free(potentialArgs); free(row); } EXPORT void eval_potential(int nR, double *R, double *z, int npot, int * pot_type, double * pot_args, double *out, int * err){ int ii; //Set up the potentials struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(npot,potentialArgs,&pot_type,&pot_args); //Run through and evaluate for (ii=0; ii < nR; ii++){ *(out+ii)= evaluatePotentials(*(R+ii),*(z+ii),npot,potentialArgs); } free_potentialArgs(npot,potentialArgs); free(potentialArgs); } EXPORT void eval_rforce(int nR, double *R, double *z, int npot, int * pot_type, double * pot_args, double *out, int * err){ int ii; //Set up the potentials struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(npot,potentialArgs,&pot_type,&pot_args); //Run through and evaluate for (ii=0; ii < nR; ii++){ *(out+ii)= calcRforce(*(R+ii),*(z+ii),0.,0.,npot,potentialArgs); } free_potentialArgs(npot,potentialArgs); free(potentialArgs); } EXPORT void eval_zforce(int nR, double *R, double *z, int npot, int * pot_type, double * pot_args, double *out, int * err){ int ii; //Set up the potentials struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(npot,potentialArgs,&pot_type,&pot_args); //Run through and evaluate for (ii=0; ii < nR; ii++){ *(out+ii)= calczforce(*(R+ii),*(z+ii),0.,0.,npot,potentialArgs); } free_potentialArgs(npot,potentialArgs); free(potentialArgs); }
GB_unop__ainv_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__ainv_fp64_fp64 // op(A') function: GB_unop_tran__ainv_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = -z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__ainv_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = -z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__ainv_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
interp_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haitao@openailab.com */ #include "interp_kernel_arm.h" #include "utility/sys_port.h" #include <math.h> #include <arm_neon.h> #define MIN(a, b) ((a) < (b) ? (a) : (b)) static void linear_coeffs(int w, int outw, int* xofs, float* alpha) { double scale = ( double )w / outw; for (int dx = 0; dx < outw; dx++) { float fx = ( float )((dx + 0.5) * scale - 0.5); int sx = floor(fx); fx -= sx; if (sx < 0) { sx = 0; fx = 0.f; } if (sx >= w - 1) { sx = w - 2; fx = 1.f; } xofs[dx] = sx; alpha[dx * 2] = 1.f - fx; alpha[dx * 2 + 1] = fx; } } static void resize_bilinear_image(float* src, float* dst, float* alpha, int* xofs, float* beta, int* yofs, int out_h, int out_w, int in_h, int in_w) { int w = out_w; // dst.w; int h = out_h; // dst.h; // loop body float* rowsbuf0 = ( float* )sys_malloc(w * sizeof(float)); float* rowsbuf1 = ( float* )sys_malloc(w * sizeof(float)); float* rows0 = rowsbuf0; float* rows1 = rowsbuf1; int prev_sy1 = -2; for (int dy = 0; dy < h; dy++) { int sy = yofs[dy]; if (sy == prev_sy1) { // reuse all rows } else if (sy == prev_sy1 + 1) { // hresize one row float* rows0_old = rows0; rows0 = rows1; rows1 = rows0_old; const float* S1 = src + (sy + 1) * in_w; // src.row(sy+1); const float* alphap = alpha; float* rows1p = rows1; // neon for (int dx = 0; dx + 1 < w; dx += 2) { int sx = xofs[dx]; int sxn = xofs[dx + 1]; const float* S1p = S1 + sx; const float* S1np = S1 + sxn; float32x4_t _a = vld1q_f32(alphap); float32x2_t _S1 = vld1_f32(S1p); float32x2_t _S1n = vld1_f32(S1np); float32x4_t _S1S1n = vcombine_f32(_S1, _S1n); float32x4_t _ms1 = vmulq_f32(_S1S1n, _a); float32x2_t _rows1 = vpadd_f32(vget_low_f32(_ms1), vget_high_f32(_ms1)); vst1_f32(rows1p + dx, _rows1); alphap += 4; } } else { // hresize two rows const float* S0 = src + sy * in_w; // src.row(sy); const float* S1 = src + (sy + 1) * in_w; // src.row(sy+1); const float* alphap = alpha; float* rows0p = rows0; float* rows1p = rows1; for (int dx = 0; dx + 1 < w; dx += 2) { int sx = xofs[dx]; int sxn = xofs[dx + 1]; const float* S0p = S0 + sx; const float* S1p = S1 + sx; const float* S0np = S0 + sxn; const float* S1np = S1 + sxn; float32x4_t _a = vld1q_f32(alphap); float32x2_t _S0 = vld1_f32(S0p); float32x2_t _S1 = vld1_f32(S1p); float32x2_t _S0n = vld1_f32(S0np); float32x2_t _S1n = vld1_f32(S1np); float32x4_t _S0S0n = vcombine_f32(_S0, _S0n); float32x4_t _S1S1n = vcombine_f32(_S1, _S1n); float32x4_t _ms0 = vmulq_f32(_S0S0n, _a); float32x4_t _ms1 = vmulq_f32(_S1S1n, _a); float32x2_t _rows0 = vpadd_f32(vget_low_f32(_ms0), vget_high_f32(_ms0)); float32x2_t _rows1 = vpadd_f32(vget_low_f32(_ms1), vget_high_f32(_ms1)); vst1_f32(rows0p + dx, _rows0); vst1_f32(rows1p + dx, _rows1); alphap += 4; } } prev_sy1 = sy; // vresize float b0 = beta[0]; float b1 = beta[1]; float* rows0p = rows0; float* rows1p = rows1; float* Dp = dst + dy * out_w; // dst.row(dy); int nn = w >> 3; int remain = w - (nn << 3); float32x4_t _b0 = vdupq_n_f32(b0); float32x4_t _b1 = vdupq_n_f32(b1); for (; nn > 0; nn--) { float32x4_t _rows0 = vld1q_f32(rows0p); float32x4_t _rows1 = vld1q_f32(rows1p); float32x4_t _D = vmulq_f32(_rows0, _b0); _D = vmlaq_f32(_D, _rows1, _b1); vst1q_f32(Dp, _D); float32x4_t _rows0n = vld1q_f32(rows0p + 4); float32x4_t _rows1n = vld1q_f32(rows1p + 4); float32x4_t _Dn = vmulq_f32(_rows0n, _b0); _Dn = vmlaq_f32(_Dn, _rows1n, _b1); vst1q_f32(Dp + 4, _Dn); Dp += 8; rows0p += 8; rows1p += 8; } for (; remain; --remain) { *Dp++ = *rows0p++ * b0 + *rows1p++ * b1; } beta += 2; } sys_free(rowsbuf0); sys_free(rowsbuf1); } static inline void interpolate_cubic(float fx, float* coeffs) { const float A = -0.75f; float fx0 = fx + 1; float fx1 = fx; float fx2 = 1 - fx; coeffs[0] = A * fx0 * fx0 * fx0 - 5 * A * fx0 * fx0 + 8 * A * fx0 - 4 * A; coeffs[1] = (A + 2) * fx1 * fx1 * fx1 - (A + 3) * fx1 * fx1 + 1; coeffs[2] = (A + 2) * fx2 * fx2 * fx2 - (A + 3) * fx2 * fx2 + 1; coeffs[3] = 1.f - coeffs[0] - coeffs[1] - coeffs[2]; } static void cubic_coeffs(int w, int outw, int* xofs, float* alpha) { double scale = ( double )w / outw; for (int dx = 0; dx < outw; dx++) { float fx = ( float )((dx + 0.5) * scale - 0.5); int sx = floor(fx); fx -= sx; interpolate_cubic(fx, alpha + dx * 4); if (sx <= -1) { sx = 1; alpha[dx * 4 + 0] = 1.f - alpha[dx * 4 + 3]; alpha[dx * 4 + 1] = alpha[dx * 4 + 3]; alpha[dx * 4 + 2] = 0.f; alpha[dx * 4 + 3] = 0.f; } if (sx == 0) { sx = 1; alpha[dx * 4 + 0] = alpha[dx * 4 + 0] + alpha[dx * 4 + 1]; alpha[dx * 4 + 1] = alpha[dx * 4 + 2]; alpha[dx * 4 + 2] = alpha[dx * 4 + 3]; alpha[dx * 4 + 3] = 0.f; } if (sx == w - 2) { sx = w - 3; alpha[dx * 4 + 3] = alpha[dx * 4 + 2] + alpha[dx * 4 + 3]; alpha[dx * 4 + 2] = alpha[dx * 4 + 1]; alpha[dx * 4 + 1] = alpha[dx * 4 + 0]; alpha[dx * 4 + 0] = 0.f; } if (sx >= w - 1) { sx = w - 3; alpha[dx * 4 + 3] = 1.f - alpha[dx * 4 + 0]; alpha[dx * 4 + 2] = alpha[dx * 4 + 0]; alpha[dx * 4 + 1] = 0.f; alpha[dx * 4 + 0] = 0.f; } xofs[dx] = sx; } } static void resize_bicubic_image(float* src, float* dst, float* alpha, int* xofs, float* beta, int* yofs, int out_h, int out_w, int in_h, int in_w) { int w = out_w; // dst.w; int h = out_h; // dst.h; // loop body float* rowsbuf0 = ( float* )sys_malloc(w * sizeof(float)); float* rowsbuf1 = ( float* )sys_malloc(w * sizeof(float)); float* rowsbuf2 = ( float* )sys_malloc(w * sizeof(float)); float* rowsbuf3 = ( float* )sys_malloc(w * sizeof(float)); float* rows0 = rowsbuf0; float* rows1 = rowsbuf1; float* rows2 = rowsbuf2; float* rows3 = rowsbuf3; int prev_sy1 = -3; for (int dy = 0; dy < h; dy++) { int sy = yofs[dy]; if (sy == prev_sy1) { // reuse all rows } else if (sy == prev_sy1 + 1) { // hresize one row float* rows0_old = rows0; rows0 = rows1; rows1 = rows2; rows2 = rows3; rows3 = rows0_old; const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2); const float* alphap = alpha; float* rows3p = rows3; for (int dx = 0; dx < w; dx++) { int sx = xofs[dx]; const float* S3p = S3 + sx; float a0 = alphap[0]; float a1 = alphap[1]; float a2 = alphap[2]; float a3 = alphap[3]; rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3; alphap += 4; } } else if (sy == prev_sy1 + 2) { // hresize two rows float* rows0_old = rows0; float* rows1_old = rows1; rows0 = rows2; rows1 = rows3; rows2 = rows0_old; rows3 = rows1_old; const float* S2 = src + (sy + 1) * in_w; // src.row(sy+1); const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2); const float* alphap = alpha; float* rows2p = rows2; float* rows3p = rows3; for (int dx = 0; dx < w; dx++) { int sx = xofs[dx]; const float* S2p = S2 + sx; const float* S3p = S3 + sx; float a0 = alphap[0]; float a1 = alphap[1]; float a2 = alphap[2]; float a3 = alphap[3]; rows2p[dx] = S2p[-1] * a0 + S2p[0] * a1 + S2p[1] * a2 + S2p[2] * a3; rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3; alphap += 4; } } else if (sy == prev_sy1 + 3) { // hresize three rows float* rows0_old = rows0; float* rows1_old = rows1; float* rows2_old = rows2; rows0 = rows3; rows1 = rows0_old; rows2 = rows1_old; rows3 = rows2_old; const float* S1 = src + sy * in_w; // src.row(sy); const float* S2 = src + (sy + 1) * in_w; // src.row(sy+1); const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2); const float* alphap = alpha; float* rows1p = rows1; float* rows2p = rows2; float* rows3p = rows3; for (int dx = 0; dx < w; dx++) { int sx = xofs[dx]; const float* S1p = S1 + sx; const float* S2p = S2 + sx; const float* S3p = S3 + sx; float a0 = alphap[0]; float a1 = alphap[1]; float a2 = alphap[2]; float a3 = alphap[3]; rows1p[dx] = S1p[-1] * a0 + S1p[0] * a1 + S1p[1] * a2 + S1p[2] * a3; rows2p[dx] = S2p[-1] * a0 + S2p[0] * a1 + S2p[1] * a2 + S2p[2] * a3; rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3; alphap += 4; } } else { // hresize four rows const float* S0 = src + (sy - 1) * in_w; // src.row(sy-1); const float* S1 = src + sy * in_w; // src.row(sy); const float* S2 = src + (sy + 1) * in_w; // src.row(sy+1); const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2); const float* alphap = alpha; float* rows0p = rows0; float* rows1p = rows1; float* rows2p = rows2; float* rows3p = rows3; for (int dx = 0; dx < w; dx++) { int sx = xofs[dx]; const float* S0p = S0 + sx; const float* S1p = S1 + sx; const float* S2p = S2 + sx; const float* S3p = S3 + sx; float a0 = alphap[0]; float a1 = alphap[1]; float a2 = alphap[2]; float a3 = alphap[3]; rows0p[dx] = S0p[-1] * a0 + S0p[0] * a1 + S0p[1] * a2 + S0p[2] * a3; rows1p[dx] = S1p[-1] * a0 + S1p[0] * a1 + S1p[1] * a2 + S1p[2] * a3; rows2p[dx] = S2p[-1] * a0 + S2p[0] * a1 + S2p[1] * a2 + S2p[2] * a3; rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3; alphap += 4; } } prev_sy1 = sy; // vresize float b0 = beta[0]; float b1 = beta[1]; float b2 = beta[2]; float b3 = beta[3]; float* rows0p = rows0; float* rows1p = rows1; float* rows2p = rows2; float* rows3p = rows3; float* Dp = dst + dy * out_w; // dst.row(dy); for (int dx = 0; dx < w; dx++) { *Dp++ = *rows0p++ * b0 + *rows1p++ * b1 + *rows2p++ * b2 + *rows3p++ * b3; } beta += 4; } sys_free(rowsbuf0); sys_free(rowsbuf1); sys_free(rowsbuf2); sys_free(rowsbuf3); } int interp_run(struct tensor* output_tensor, struct tensor* input_tensor, struct interp_param* interp_param, int num_thread) { int resize_type = interp_param->resize_type; int out_w = interp_param->output_width; int out_h = interp_param->output_height; float width_scale = interp_param->width_scale; float height_scale = interp_param->height_scale; int in_c = input_tensor->dims[1]; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; float* data = ( float* )input_tensor->data; float* out_data = ( float* )output_tensor->data; if (out_h == 0 || out_w == 0) { out_h = in_h * height_scale; out_w = in_w * width_scale; } if (out_h == in_h && out_w == in_w) { out_data = data; return 0; } int out_channel_size = out_h * out_w; int in_channel_size = in_h * in_w; if (input_tensor->dim_num == 1) { #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < input_tensor->dims[0]; ++q) { for (int i = 0; i < out_h * out_w; i++) { out_data[q * out_h * out_w + i] = data[q]; } } return 0; } if (resize_type == 1) // nearest { #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < in_c; q++) { for (int y = 0; y < out_h; ++y) { const int in_y = MIN(( int )(y / height_scale), (in_h - 1)); for (int x = 0; x < out_w; ++x) { const int in_x = MIN(( int )(x / width_scale), (in_w - 1)); out_data[out_w * y + x + out_w * out_h * q] = data[in_y * in_w + in_x + q * in_w * in_h]; } } } } else if (resize_type == 2) // bilinear { int* buf = ( int* )sys_malloc((out_w + out_h + out_w * 2 + out_h * 2) * sizeof(int)); int* xofs = buf; // new int[ow]; int* yofs = buf + out_w; // new int[oh]; float* alpha = ( float* )(buf + out_w + out_h); // new float[ow * 2]; float* beta = ( float* )(buf + out_w + out_h + out_w * 2); // new float[oh * 2]; linear_coeffs(in_w, out_w, xofs, alpha); linear_coeffs(in_h, out_h, yofs, beta); #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < in_c; ++q) { resize_bilinear_image(data + in_channel_size * q, out_data + out_channel_size * q, alpha, xofs, beta, yofs, out_h, out_w, in_h, in_w); } sys_free(buf); } else if (resize_type == 3) // bicubic { int* buf = ( int* )sys_malloc((out_w + out_h + out_w * 4 + out_h * 4) * sizeof(int)); int* xofs = buf; // new int[ow]; int* yofs = buf + out_w; // new int[oh]; float* alpha = ( float* )(buf + out_w + out_h); // new float[ow * 4]; float* beta = ( float* )(buf + out_w + out_h + out_w * 4); // new float[oh * 4]; cubic_coeffs(in_w, out_w, xofs, alpha); cubic_coeffs(in_h, out_h, yofs, beta); #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < in_c; q++) { resize_bicubic_image(data + in_channel_size * q, out_data + out_channel_size * q, alpha, xofs, beta, yofs, out_h, out_w, in_h, in_w); } sys_free(buf); return 0; } return 0; }
constCurvModel2d.c
/* Include constant curvature drivers for all dimensions here */ /* System headers */ #include <stdio.h> #include <stdlib.h> #include <math.h> /* Local headers */ #include "QSSLIB_config.h" #include "qss_options.h" #include "qss_spatial_derivatives2d.h" #include "qss_tvd_runge_kutta2d.h" #include "qss_data_arrays.h" #include "qss_util2d.h" #include "qss_macros.h" #include "qss_reinitialization2d.h" #include "qss_grid.h" #include "constCurvModel2d.h" #include "qss_general_util.h" #include "connectivity.h" /* Main driver for constant curvature level set method model */ QSSLIB_REAL constCurvModel2d(Options *options,QSS_DataArrays *p, Grid *g, FILE *fp_out, QSSLIB_REAL a0) { QSSLIB_REAL zero = 0.0; char fname[256]; int flag = 0, OUTER_STEP = 0, INNER_STEP, idx; QSSLIB_REAL dt = 0, dt_sub, t = 0; QSSLIB_REAL mask_sign = -1; QSSLIB_REAL eps, cur_max_H_over_dX = -1, cfl_number = 0.9; QSSLIB_REAL max_abs_err = 100, vol_phi = 100, vol_phi_old, vol_very_small; vol_very_small = (g->dx)[0]*(g->dx)[1]; eps = (options->eps_coefficient)*(g->dx[0]); QSS2D_VOLUME_REGION_PHI_LESS_THAN_ZERO(&vol_phi, p->phi, GB_DIMS_2D, FB_DIMS_2D, &(g->dx[0]),&(g->dx[1]), &eps); while( (t < options->tmax) && (max_abs_err > options->eps_stop) && (vol_phi > vol_very_small)) { /* outer loop */ OUTER_STEP++; dt_sub = 0; COPY_DATA(p->phi_prev,p->phi,g) vol_phi_old = vol_phi; /* Begin parallel region */ #pragma omp parallel default(none) shared(p, g, flag, cfl_number,\ cur_max_H_over_dX, zero, dt_sub, vol_phi, max_abs_err, eps, options) \ private(INNER_STEP, dt) { INNER_STEP = 0; QSSLIB_REAL max_H_over_dX; int bdry_location_idx = 9; /* all boundaries */ QSSLIB_REAL disconn_overlap = 0; /* Set up variables for multi-threading */ int cur_thread, cur_jlo_fb, cur_jhi_fb, num_threads, nslices, i; int cur_jlo_gb, cur_jhi_gb; cur_thread = omp_get_thread_num(); num_threads = omp_get_num_threads(); if (num_threads > 1) flag = 1; nslices = g->jhi_fb - g->jlo_fb + 1; cur_jlo_fb = g->jlo_fb + nslices*cur_thread/num_threads; cur_jhi_fb = g->jlo_fb + nslices*(cur_thread + 1)/num_threads - 1; double t1 = omp_get_wtime(); /* Keeping track of thread-local ghost boundaries, mainly for imposing mask */ if (cur_jhi_fb > (g->jhi_fb)) cur_jhi_fb = (g->jhi_fb); if (cur_thread == 0) cur_jlo_gb = cur_jlo_fb - 3; else cur_jlo_gb = cur_jlo_fb; if (cur_thread == (num_threads - 1)) cur_jhi_gb = cur_jhi_fb + 3; else cur_jhi_gb = cur_jhi_fb; while( dt_sub < options->tplot ) { /* inner loop */ INNER_STEP++; QSS2D_GET_RHS_SECOND(p->phi, p->normal_velocity, p->curvature_coeff, p->external_velocity_x, p->external_velocity_y, &(max_H_over_dX), p->lse_rhs, GB_DIMS_2D, FB_DIMS_PAR_2D, &((g->dx)[0]),&((g->dx)[1])); #pragma omp barrier #pragma omp critical { if(max_H_over_dX > cur_max_H_over_dX) cur_max_H_over_dX = max_H_over_dX; } /* Barrier to ensure same cur_max_H_over_dX across all threads. */ #pragma omp barrier /* get final correct dt due to parabolic (curvature) term */ dt = cfl_number / (cur_max_H_over_dX + options->b_max_over_dx + options->max_U_over_dx); QSS2D_RK1_STEP(p->phi_next,GB_DIMS_2D,p->phi,GB_DIMS_2D,p->lse_rhs, GB_DIMS_2D, FB_DIMS_PAR_2D, &dt); #pragma omp barrier IMPOSE_MASK_PAR_2D(p->phi, p->mask, p->phi_next, &(options->overlap), GB_DIMS_2D, &(cur_jlo_gb), &(cur_jhi_gb)); #pragma omp barrier IMPOSE_MASK_PAR_2D(p->phi, p->mask_w, p->phi, &(disconn_overlap), GB_DIMS_2D, &(cur_jlo_gb), &(cur_jhi_gb)); #pragma omp barrier IMPOSE_MASK_PAR_2D(p->phi, p->mask_nw, p->phi, &(disconn_overlap), GB_DIMS_2D, &(cur_jlo_gb), &(cur_jhi_gb)); #pragma omp barrier /* boundary conditions */ #pragma omp single { signedLinearExtrapolationBCqss(p->phi,g,bdry_location_idx); cur_max_H_over_dX = -1; max_H_over_dX = -1; } /* Implicit barrier after omp single, so all threads should sync here */ #pragma omp single { SET_DATA_TO_CONSTANT(p->lse_rhs,g,zero); dt_sub += dt; } } /* End inner loop */ double t2 = omp_get_wtime(); if (cur_thread == 0) printf("%d threads: Level set time = %lf\n", num_threads, t2 - t1); } /* End parallel region */ /* Reinitialization of the level set function - may want to parallelize the following functions later */ t += dt_sub; double t3 = omp_get_wtime(); if(options->check_connectivity) //if (((int)t % 1) == 0) trapComponents_mask(p, g, options); printf("Reinitializing...."); reinitialize2d_subcell_fix_qss(p,g,options); printf("Reinitialized\n"); /* compute stopping criteria */ /* max abs error */ QSS2D_MAX_NORM_DIFF_LOCAL(&max_abs_err,p->phi,GB_DIMS_2D, p->phi_prev, GB_DIMS_2D, FB_DIMS_2D, &(options->err_check_zone)); QSSLIB_REAL vol_phi_old = vol_phi; QSS2D_VOLUME_REGION_PHI_LESS_THAN_ZERO(&vol_phi, p->phi, GB_DIMS_2D, FB_DIMS_2D, &(g->dx[0]),&(g->dx[1]), &eps); if (fabsf(vol_phi - vol_phi_old) < 0.1*options->eps_stop) break; if(options->checkpoint) { sprintf(fname,"checkpoint_phi"); writeDataArrayQSS(p->phi,g,fname,GZIP); sprintf(fname,"checkpoint_phi_prev"); writeDataArrayQSS(p->phi_prev,g,fname,GZIP); sprintf(fname,"mask_w"); writeDataArrayQSS(p->mask_w,g,fname,GZIP); sprintf(fname,"mask_nw"); writeDataArrayQSS(p->mask_nw,g,fname,GZIP); } double t4 = omp_get_wtime(); printf("connectivity time = %lf\n", t4 - t3); printf("t = %f\t",t); printf("max_abs_err = %4.3f,\t",max_abs_err); printf("vol_nwp = %4.3f\n",vol_phi); /* If nw phase saturation isn't changing much, then continue on */ //if ((vol_phi - vol_phi_old < options->eps_stop) && (max_abs_err < 0.1)) // break; } /* End outer loop */ /* Merge disconnected components for writing to saved data */ MERGE_SETS(p->phi, p->mask_nw, g); return t; }
hw2b_time(dynamic).c
#ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #define PNG_NO_SETJMP #include <sched.h> #include <assert.h> #include <png.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include <mpi.h> #include <pthread.h> void write_png(const char* filename, int iters, int width, int height, const int* buffer) { FILE* fp = fopen(filename, "wb"); assert(fp); png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); assert(png_ptr); png_infop info_ptr = png_create_info_struct(png_ptr); assert(info_ptr); png_init_io(png_ptr, fp); png_set_IHDR(png_ptr, info_ptr, width, height, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); png_set_filter(png_ptr, 0, PNG_NO_FILTERS); png_write_info(png_ptr, info_ptr); png_set_compression_level(png_ptr, 1); size_t row_size = 3 * width * sizeof(png_byte); png_bytep row = (png_bytep)malloc(row_size); for (int y = 0; y < height; ++y) { memset(row, 0, row_size); for (int x = 0; x < width; ++x) { int p = buffer[(height - 1 - y) * width + x]; png_bytep color = row + x * 3; if (p != iters) { if (p & 16) { color[0] = 240; color[1] = color[2] = p % 16 * 16; } else { color[0] = p % 16 * 16; } } } png_write_row(png_ptr, row); } free(row); png_write_end(png_ptr, NULL); png_destroy_write_struct(&png_ptr, &info_ptr); fclose(fp); } int main(int argc, char** argv) { int rank, size; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); /* argument parsing */ assert(argc == 9); const char* filename = argv[1]; int iters = strtol(argv[2], 0, 10); double left = strtod(argv[3], 0); double right = strtod(argv[4], 0); double lower = strtod(argv[5], 0); double upper = strtod(argv[6], 0); int width = strtol(argv[7], 0, 10); int height = strtol(argv[8], 0, 10); /* allocate memory for image */ int* image = (int*)malloc(width * height * sizeof(int)); int* result = (int*)malloc(width * height * sizeof(int)); double start = MPI_Wtime(); #pragma omp parallel for schedule(dynamic) /* mandelbrot set */ for (int j = rank; j < height; j += size) { double y0 = j * ((upper - lower) / height) + lower; for (int i = 0; i < width; ++i) { double x0 = i * ((right - left) / width) + left; int repeats = 0; double x = 0; double y = 0; double length_squared = 0; while (repeats < iters && length_squared < 4) { double temp = x * x - y * y + x0; y = 2 * x * y + y0; x = temp; length_squared = x * x + y * y; ++repeats; } image[j * width + i] = repeats; } } double end = MPI_Wtime(); MPI_Reduce(image, result, width * height, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); printf("%f\n", end - start); if (rank == 0){ /* draw and cleanup */ write_png(filename, iters, width, height, result); free(image); } MPI_Finalize(); }
WaveFunctionComponent.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2020 QMCPACK developers. // // File developed by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign // Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory // Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign // Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign // Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory // Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory // // File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H #define QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H #include "Message/Communicate.h" #include "Configuration.h" #include "Particle/ParticleSet.h" #include "Particle/VirtualParticleSet.h" #include "Particle/DistanceTableData.h" #include "OhmmsData/RecordProperty.h" #include "QMCWaveFunctions/OrbitalSetTraits.h" #include "Particle/MCWalkerConfiguration.h" #include "type_traits/template_types.hpp" #ifdef QMC_CUDA #include "type_traits/CUDATypes.h" #endif /**@file WaveFunctionComponent.h *@brief Declaration of WaveFunctionComponent */ namespace qmcplusplus { #ifdef QMC_CUDA struct NLjob { int walker; int elec; int numQuadPoints; NLjob(int w, int e, int n) : walker(w), elec(e), numQuadPoints(n) {} }; #endif ///forward declaration of WaveFunctionComponent struct WaveFunctionComponent; ///forward declaration of DiffWaveFunctionComponent struct DiffWaveFunctionComponent; typedef WaveFunctionComponent* WaveFunctionComponentPtr; typedef DiffWaveFunctionComponent* DiffWaveFunctionComponentPtr; /**@defgroup WaveFunctionComponent group * @brief Classes which constitute a many-body trial wave function * * A many-body trial wave function is * \f[ \Psi(\{ {\bf R}\}) = \prod_i \psi_{i}(\{ {\bf R}\}), * \f] * where \f$\Psi\f$s are represented by * the derived classes from WaveFunctionComponent. */ /** @ingroup WaveFunctionComponent * @brief An abstract class for a component of a many-body trial wave function * * mw_ prefix is a function name signature indicating it is for handling a batch of WaveFunctionComponent objects * which are required to be base class pointers of the same derived class type. * all the mw_ routines must be implemented in a way either stateless or maintains states of every walker. */ struct WaveFunctionComponent : public QMCTraits { /** enum for a update mode */ enum { ORB_PBYP_RATIO, /*!< particle-by-particle ratio only */ ORB_PBYP_ALL, /*!< particle-by-particle, update Value-Gradient-Laplacian */ ORB_PBYP_PARTIAL, /*!< particle-by-particle, update Value and Grdient */ ORB_WALKER, /*!< walker update */ ORB_ALLWALKER /*!< all walkers update */ }; typedef ParticleAttrib<ValueType> ValueVectorType; typedef ParticleAttrib<GradType> GradVectorType; typedef ParticleSet::Walker_t Walker_t; typedef Walker_t::WFBuffer_t WFBufferType; typedef Walker_t::Buffer_t BufferType; typedef OrbitalSetTraits<RealType>::ValueMatrix_t RealMatrix_t; typedef OrbitalSetTraits<ValueType>::ValueMatrix_t ValueMatrix_t; typedef OrbitalSetTraits<ValueType>::GradMatrix_t GradMatrix_t; typedef OrbitalSetTraits<ValueType>::HessType HessType; typedef OrbitalSetTraits<ValueType>::HessVector_t HessVector_t; // the value type for log(psi) using LogValueType = std::complex<QTFull::RealType>; // the value type for psi(r')/psi(r) using PsiValueType = QTFull::ValueType; /** flag to set the optimization mode */ bool IsOptimizing; /** boolean to set optimization * * If true, this object is actively modified during optimization */ bool Optimizable; /** true, if this component is fermionic */ bool is_fermionic; /** current update mode */ int UpdateMode; /** current \f$\log\phi \f$ */ LogValueType LogValue; /** Pointer to the differential WaveFunctionComponent of this object * * If dPsi=0, this WaveFunctionComponent is constant with respect to the optimizable variables */ DiffWaveFunctionComponentPtr dPsi; /** A vector for \f$ \frac{\partial \nabla \log\phi}{\partial \alpha} \f$ */ GradVectorType dLogPsi; /** A vector for \f$ \frac{\partial \nabla^2 \log\phi}{\partial \alpha} \f$ */ ValueVectorType d2LogPsi; /** Name of the class derived from WaveFunctionComponent */ const std::string ClassName; /** Name of the object * It is required to be different for objects of the same derived type like multiple J1. * It can be left empty for object which is unique per many-body WF. */ const std::string myName; ///list of variables this WaveFunctionComponent handles opt_variables_type myVars; ///Bytes in WFBuffer size_t Bytes_in_WFBuffer; /// default constructor WaveFunctionComponent(const std::string& class_name, const std::string& obj_name = ""); ///default destructor virtual ~WaveFunctionComponent() {} inline void setOptimizable(bool optimizeit) { Optimizable = optimizeit; } ///assign a differential WaveFunctionComponent virtual void setDiffOrbital(DiffWaveFunctionComponentPtr d); ///assembles the full value PsiValueType getValue() const { return LogToValue<PsiValueType>::convert(LogValue); } /** check in optimizable parameters * @param active a super set of optimizable variables * * Add the paramemters this WaveFunctionComponent manage to active. */ virtual void checkInVariables(opt_variables_type& active) = 0; /** check out optimizable variables * * Update myVars index map */ virtual void checkOutVariables(const opt_variables_type& active) = 0; /** reset the parameters during optimizations */ virtual void resetParameters(const opt_variables_type& active) = 0; /** print the state, e.g., optimizables */ virtual void reportStatus(std::ostream& os) = 0; /** evaluate the value of the WaveFunctionComponent from scratch * @param P active ParticleSet * @param G Gradients, \f$\nabla\ln\Psi\f$ * @param L Laplacians, \f$\nabla^2\ln\Psi\f$ * @return the log value * * Mainly for walker-by-walker move. The initial stage of particle-by-particle * move also uses this. */ virtual LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) = 0; /** evaluate from scratch the same type WaveFunctionComponent of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param G_list the list of Gradients pointers in a walker batch, \f$\nabla\ln\Psi\f$ * @param L_list the list of Laplacians pointers in a walker batch, \f$\nabla^2\ln\Psi\f$ * @@param values the log WF values of walkers in a batch */ virtual void mw_evaluateLog(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, const RefVector<ParticleSet::ParticleGradient_t>& G_list, const RefVector<ParticleSet::ParticleLaplacian_t>& L_list) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) WFC_list[iw].get().evaluateLog(P_list[iw], G_list[iw], L_list[iw]); } /** recompute the value of the WaveFunctionComponents which require critical accuracy. * needed for Slater Determinants but not needed for most types of WaveFunctionComponents */ virtual void recompute(ParticleSet& P) {} // virtual void evaluateHessian(ParticleSet& P, IndexType iat, HessType& grad_grad_psi) // { // APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented"); // } virtual void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi_all) { APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented in " + ClassName + " class."); } /** return the current gradient for the iat-th particle * @param P quantum particle set * @param iat particle index * @return the gradient of the iat-th particle */ virtual GradType evalGrad(ParticleSet& P, int iat) { APP_ABORT("WaveFunctionComponent::evalGradient is not implemented in " + ClassName + " class."); return GradType(); } /** return the current spin gradient for the iat-th particle * Default implementation assumes that WaveFunctionComponent does not explicitly depend on Spin. * @param P quantum particle set * @param iat particle index * @return the spin gradient of the iat-th particle */ virtual GradType evalGradWithSpin(ParticleSet& P, int iat, ComplexType& spingrad) { return evalGrad(P, iat); } /** compute the current gradients for the iat-th particle of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param grad_now the list of gradients in a walker batch, \f$\nabla\ln\Psi\f$ */ virtual void mw_evalGrad(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int iat, std::vector<GradType>& grad_now) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) grad_now[iw] = WFC_list[iw].get().evalGrad(P_list[iw].get(), iat); } /** return the logarithmic gradient for the iat-th particle * of the source particleset * @param Pquantum particle set * @param iat particle index * @return the gradient of the iat-th particle */ virtual GradType evalGradSource(ParticleSet& P, ParticleSet& source, int iat) { // unit_test_hamiltonian calls this function incorrectly; do not abort for now // APP_ABORT("WaveFunctionComponent::evalGradSource is not implemented"); return GradType(); } /** Adds the gradient w.r.t. the iat-th particle of the * source particleset (ions) of the logarithmic gradient * and laplacian w.r.t. the target paritlceset (electrons). * @param P quantum particle set (electrons) * @param source classical particle set (ions) * @param iat particle index of source (ion) * @param the ion gradient of the elctron gradient * @param the ion gradient of the elctron laplacian. * @return the log gradient of psi w.r.t. the source particle iat */ virtual GradType evalGradSource(ParticleSet& P, ParticleSet& source, int iat, TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad, TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad) { return GradType(); } /** evaluate the ratio of the new to old WaveFunctionComponent value and the new gradient * @param P the active ParticleSet * @param iat the index of a particle * @param grad_iat Gradient for the active particle */ virtual PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat); virtual void ratioGradAsync(ParticleSet& P, int iat, PsiValueType& ratio, GradType& grad_iat); /** evaluate the ratio of the new to old WaveFunctionComponent value and the new spin gradient * Default implementation assumes that WaveFunctionComponent does not explicitly depend on Spin. * @param P the active ParticleSet * @param iat the index of a particle * @param grad_iat realspace gradient for the active particle * @param spingrad_iat spin gradient for the active particle */ virtual PsiValueType ratioGradWithSpin(ParticleSet& P, int iat, GradType& grad_iat, ComplexType& spingrad_iat) { return ratioGrad(P, iat, grad_iat); } /** compute the ratio of the new to old WaveFunctionComponent value and the new gradient of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$ * @param grad_now the list of new gradients in a walker batch, \f$\nabla\ln\Psi\f$ */ virtual void mw_ratioGrad(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int iat, std::vector<PsiValueType>& ratios, std::vector<GradType>& grad_new); virtual void mw_ratioGradAsync(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int iat, std::vector<PsiValueType>& ratios, std::vector<GradType>& grad_new); /** a move for iat-th particle is accepted. Update the current content. * @param P target ParticleSet * @param iat index of the particle whose new position was proposed * @param safe_to_delay if true, delayed accept is safe. */ virtual void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false) = 0; /** moves of the iat-th particle on some walkers in a batch is accepted. Update the current content. * Note that all the lists only include accepted walkers. * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param safe_to_delay if true, delayed accept is safe. */ virtual void mw_accept_rejectMove(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int iat, const std::vector<bool>& isAccepted, bool safe_to_delay = false) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) if (isAccepted[iw]) WFC_list[iw].get().acceptMove(P_list[iw], iat, safe_to_delay); else WFC_list[iw].get().restore(iat); } /** complete all the delayed updates, must be called after each substep or step during pbyp move */ virtual void completeUpdates() {} /** complete all the delayed updates for all the walkers in a batch * must be called after each substep or step during pbyp move */ virtual void mw_completeUpdates(const RefVector<WaveFunctionComponent>& WFC_list) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) WFC_list[iw].get().completeUpdates(); } /** If a move for iat-th particle is rejected, restore to the content. * @param iat index of the particle whose new position was proposed * * Ye: hopefully we can gradually move away from restore */ virtual void restore(int iat) = 0; /** evaluate the ratio of the new to old WaveFunctionComponent value * @param P the active ParticleSet * @param iat the index of a particle * @return \f$ \psi( \{ {\bf R}^{'} \} )/ \psi( \{ {\bf R}\})\f$ * * Specialized for particle-by-particle move */ virtual PsiValueType ratio(ParticleSet& P, int iat) = 0; /** compute the ratio of the new to old WaveFunctionComponent value of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$ */ virtual void mw_calcRatio(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int iat, std::vector<PsiValueType>& ratios) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) ratios[iw] = WFC_list[iw].get().ratio(P_list[iw], iat); } /** For particle-by-particle move. Requests space in the buffer * based on the data type sizes of the objects in this class. * @param P particle set * @param buf Anonymous storage */ virtual void registerData(ParticleSet& P, WFBufferType& buf) = 0; /** For particle-by-particle move. Requests space in the buffer * based on the data type sizes of the objects in this class. * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param buf_list Anonymous storage */ virtual void mw_registerData(const std::vector<WaveFunctionComponent*>& WFC_list, const std::vector<ParticleSet*>& P_list, const RefVector<WFBufferType>& buf_list) { // We can't make this static but we can use a lambda with no capture to // restrict access to *this scope auto registerComponentData = [](WaveFunctionComponent& wfc, ParticleSet& pset, WFBufferType& wfb) { wfc.registerData(pset, wfb); }; for (int iw = 0; iw < WFC_list.size(); iw++) registerComponentData(*(WFC_list[iw]), *(P_list[iw]), buf_list[iw]); } /** For particle-by-particle move. Put the objects of this class * in the walker buffer or forward the memory cursor. * @param P particle set * @param buf Anonymous storage * @param fromscratch request recomputing the precision critical * pieces of wavefunction from scratch * @return log value of the wavefunction. */ virtual LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) = 0; /** For particle-by-particle move. Put the objects of this class * in the walker buffer or forward the memory cursor. * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param buf_list Anonymous storage * @@param values the log WF values of walkers in a batch * @param fromscratch request recomputing the precision critical * pieces of wavefunction from scratch */ virtual void mw_updateBuffer(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, const RefVector<WFBufferType>& buf_list, bool fromscratch = false) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) WFC_list[iw].get().updateBuffer(P_list[iw], buf_list[iw], fromscratch); } /** For particle-by-particle move. Copy data or attach memory * from a walker buffer to the objects of this class. * The log value, P.G and P.L contribution from the objects * of this class are also added. * @param P particle set * @param buf Anonymous storage */ virtual void copyFromBuffer(ParticleSet& P, WFBufferType& buf) = 0; /** For particle-by-particle move. Copy data or attach memory * from a walker buffer to the objects of this class. * @param P particle set * @param buf Anonymous storage */ virtual void mw_copyFromBuffer(const RefVector<WaveFunctionComponent>& wfc_list, const RefVector<ParticleSet>& p_list, const RefVector<WFBufferType>& buf_list) { #pragma omp parallel for for (int iw = 0; iw < wfc_list.size(); iw++) wfc_list[iw].get().copyFromBuffer(p_list[iw], buf_list[iw]); } /** make clone * @param tqp target Quantum ParticleSet * @param deepcopy if true, make a decopy * * If not true, return a proxy class */ virtual WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const; /** Intended as a handle to break * * */ //virtual WaveFunctionComponentPtr makeThrScope(std::vector<std::pair<int,int>>& ptcl_group_indexes) const = 0; /** Return the Chiesa kinetic energy correction */ virtual RealType KECorrection(); /** Compute derivatives of the wavefunction with respect to the optimizable * parameters. * @param P particle set * @param optvars optimizable parameters * @param dlogpsi array of derivatives of the log of the wavefunction * @param dhpsioverpsi array of derivatives of the Laplacian of the wavefunction divided by the wavefunction. * Note that this does not use the Laplacian of the log of the wavefunction, as in evaluateLog. * Also the factor of -1/2 from the kinetic energy must be included here. The 1/m * factor is applied in TrialWaveFunction. */ virtual void evaluateDerivatives(ParticleSet& P, const opt_variables_type& optvars, std::vector<ValueType>& dlogpsi, std::vector<ValueType>& dhpsioverpsi); /** Compute derivatives of rhe wavefunction with respect to the optimizable * parameters * @param P particle set * @param optvars optimizable parameters * @param dlogpsi array of derivatives of the log of the wavefunction * Note: this function differs from the evaluateDerivatives function in the way that it only computes * the derivative of the log of the wavefunction. */ virtual void evaluateDerivativesWF(ParticleSet& P, const opt_variables_type& optvars, std::vector<ValueType>& dlogpsi); virtual void multiplyDerivsByOrbR(std::vector<ValueType>& dlogpsi) { RealType myrat = std::real(LogToValue<PsiValueType>::convert(LogValue)); for (int j = 0; j < myVars.size(); j++) { int loc = myVars.where(j); dlogpsi[loc] *= myrat; } } /** Calculates the derivatives of \f$ \grad(\textrm{log}(\psif)) \f$ with respect to the optimizable parameters, and the dot product of this is then performed with the passed-in G_in gradient vector. This object is then returned as dgradlogpsi. */ virtual void evaluateGradDerivatives(const ParticleSet::ParticleGradient_t& G_in, std::vector<ValueType>& dgradlogpsi) { APP_ABORT("Need specialization of WaveFunctionComponent::evaluateGradDerivatives in " + ClassName + " class.\n"); } virtual void finalizeOptimization() {} /** evaluate the ratios of one virtual move with respect to all the particles * @param P reference particleset * @param ratios \f$ ratios[i]=\{{\bf R}\}\rightarrow {r_0,\cdots,r_i^p=pos,\cdots,r_{N-1}}\f$ */ virtual void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios); /** evaluate ratios to evaluate the non-local PP * @param VP VirtualParticleSet * @param ratios ratios with new positions VP.R[k] the VP.refPtcl */ virtual void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios); /** evaluate ratios to evaluate the non-local PP multiple walkers * @param wfc_list the list of WaveFunctionComponent references of the same component in a walker batch * @param vp_list the list of VirtualParticleSet references in a walker batch * @param ratios of all the virtual moves of all the walkers */ virtual void mw_evaluateRatios(const RefVector<WaveFunctionComponent>& wfc_list, const RefVector<const VirtualParticleSet>& vp_list, std::vector<std::vector<ValueType>>& ratios) { #pragma omp parallel for for (int iw = 0; iw < wfc_list.size(); iw++) wfc_list[iw].get().evaluateRatios(vp_list[iw], ratios[iw]); } /** evaluate ratios to evaluate the non-local PP * @param VP VirtualParticleSet * @param ratios ratios with new positions VP.R[k] the VP.refPtcl * @param dratios \f$\partial_{\alpha}(\ln \Psi ({\bf R}^{\prime}) - \ln \Psi ({\bf R})) \f$ */ virtual void evaluateDerivRatios(VirtualParticleSet& VP, const opt_variables_type& optvars, std::vector<ValueType>& ratios, Matrix<ValueType>& dratios); ///////////////////////////////////////////////////// // Functions for vectorized evaluation and updates // ///////////////////////////////////////////////////// #ifdef QMC_CUDA using CTS = CUDAGlobalTypes; virtual void freeGPUmem() {} virtual void recompute(MCWalkerConfiguration& W, bool firstTime) {} virtual void reserve(PointerPool<gpu::device_vector<CTS::ValueType>>& pool, int kblocksize) {} /** Evaluate the log of the WF for all walkers * @param walkers vector of all walkers * @param logPsi output vector of log(psi) */ virtual void addLog(MCWalkerConfiguration& W, std::vector<RealType>& logPsi) { APP_ABORT("Need specialization of WaveFunctionComponent::addLog for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } /** Evaluate the wave-function ratio w.r.t. moving particle iat * for all walkers * @param walkers vector of all walkers * @param iat particle which is moving * @param psi_ratios output vector with psi_new/psi_old */ virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios) { APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } // Returns the WF ratio and gradient w.r.t. iat for each walker // in the respective vectors virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad) { APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void calcRatio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::calcRatio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void addRatio(MCWalkerConfiguration& W, int iat, int k, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::addRatio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void ratio(std::vector<Walker_t*>& walkers, std::vector<int>& iatList, std::vector<PosType>& rNew, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void addGradient(MCWalkerConfiguration& W, int iat, std::vector<GradType>& grad) { APP_ABORT("Need specialization of WaveFunctionComponent::addGradient for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void calcGradient(MCWalkerConfiguration& W, int iat, int k, std::vector<GradType>& grad) { APP_ABORT("Need specialization of WaveFunctionComponent::calcGradient for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void gradLapl(MCWalkerConfiguration& W, GradMatrix_t& grads, ValueMatrix_t& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::gradLapl for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void det_lookahead(MCWalkerConfiguration& W, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl, int iat, int k, int kd, int nw) { APP_ABORT("Need specialization of WaveFunctionComponent::det_lookahead for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void update(MCWalkerConfiguration* W, std::vector<Walker_t*>& walkers, int iat, std::vector<bool>* acc, int k) { APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void update(const std::vector<Walker_t*>& walkers, const std::vector<int>& iatList) { APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void NLratios(MCWalkerConfiguration& W, std::vector<NLjob>& jobList, std::vector<PosType>& quadPoints, std::vector<ValueType>& psi_ratios) { APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void NLratios(MCWalkerConfiguration& W, gpu::device_vector<CUDA_PRECISION*>& Rlist, gpu::device_vector<int*>& ElecList, gpu::device_vector<int>& NumCoreElecs, gpu::device_vector<CUDA_PRECISION*>& QuadPosList, gpu::device_vector<CUDA_PRECISION*>& RatioList, int numQuadPoints) { APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void evaluateDerivatives(MCWalkerConfiguration& W, const opt_variables_type& optvars, RealMatrix_t& dgrad_logpsi, RealMatrix_t& dhpsi_over_psi) { APP_ABORT("Need specialization of WaveFunctionComponent::evaluateDerivatives for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } #endif }; } // namespace qmcplusplus #endif
loopct_r4.c
/* * Input: ntabs nchannels padded_size * Output: ntabs ntimes -nchannels ; ntimes < padded_size * * We process a finished tab directly, so no need to build up the full ntabs array */ void deinterleave(const char *page, char *transposed, const int ntabs, const int nchannels, const int ntimes, const int padded_size) { int tab; for (tab = 0; tab < ntabs; tab++) { int channel; #pragma omp parallel for for (channel = 0; channel < nchannels; channel+=4) { const char *channelA = &page[(tab*nchannels + channel + 0)*padded_size]; const char *channelB = &page[(tab*nchannels + channel + 1)*padded_size]; const char *channelC = &page[(tab*nchannels + channel + 2)*padded_size]; const char *channelD = &page[(tab*nchannels + channel + 3)*padded_size]; int time; for (time = 0; time < ntimes; time++) { // reverse freq order to comply with header transposed[time*nchannels+nchannels-(channel+0)-1] = channelA[time]; transposed[time*nchannels+nchannels-(channel+1)-1] = channelB[time]; transposed[time*nchannels+nchannels-(channel+2)-1] = channelC[time]; transposed[time*nchannels+nchannels-(channel+3)-1] = channelD[time]; } } } }
omp_task_untied.c
<ompts:test> <ompts:testdescription>Test which checks the untied clause of the omp task directive. The idear of the tests is to generate a set of tasks in a single region. We create more tasks than threads exist, so at least one thread should handle more than one thread. Then we send the half of the threads into a bussy loop. We let finish the other threads. Now we should get rescheduled some untied tasks to the idle threads.</ompts:testdescription> <ompts:ompversion>3.0</ompts:ompversion> <ompts:directive>omp task untied</ompts:directive> <ompts:dependences>omp single, omp flush</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int <ompts:testcode:functionname>omp_task_untied</ompts:testcode:functionname>(FILE * logFile){ int i; <ompts:orphan:vars> int result = 0; int started = 0; int state_init = 1; int state_run = 1; int num_tasks = 0; int num_threads; int max_num_tasks; int *start_tids; /* array holding for each thread the id of the first executing thread */ </ompts:orphan:vars> #pragma omp parallel { #pragma omp single nowait { num_threads = omp_get_num_threads(); max_num_tasks = num_threads * MAX_TASKS_PER_THREAD; start_tids = (int *) malloc(max_num_tasks * sizeof(int)); for (i = 0; i < max_num_tasks; i++) { start_tids[i] = -1; /* mark as not assigned */ } for (i = 0; i < max_num_tasks; i++) { <ompts:orphan> # pragma omp task <ompts:check>untied</ompts:check> { int task_id; #pragma om atomic task_id = num_tasks; fprintf(logFile, "Generated task %d. Initial thread is %d\n", task_id, omp_get_thread_num()); fflush(logFile); if (start_tids[task_id] == -1) { /* initial thread assignement */ start_tids[task_id] = omp_get_thread_num(); # pragma omp atomic num_tasks++; } else { fprintf(logFile, "Ecountered reassignment of task with task restart.\n"); fflush(logFile); # pragma omp atomic result++; } /* Wait untill all tasks are generated or timeout for initialization is reached. * The timeout is needed, as the runtime may only allow a limited number of * tasks to be submitted to the execution queue - which is smaller than the number * of tasks we want to generate. */ while (num_tasks < max_num_tasks && state_init) { my_sleep (SLEEPTIME); # pragma omp flush (num_tasks) # pragma omp flush (state_init) } /* Suspend every second task */ if ((task_id % 2) == 0) { do { int current_tid; my_sleep (SLEEPTIME); current_tid = omp_get_thread_num (); if (current_tid != start_tids[task_id]) { fprintf(logFile, "Ecountered reassignment of task during task execution.\n"); fflush(logFile); # pragma omp atomic result++; break; } # pragma omp flush (state_run) } while (state_run); } } /* end of omp task */ </ompts:orphan> } /* end of for */ /* wait until all tasks have been created and were sheduled at least * a first time or timeout is reached */ while (num_tasks < max_num_tasks && state_init) { my_sleep (SLEEPTIME); # pragma omp flush (num_tasks) # pragma omp flush (state_init) } } /* end of single */ my_sleep(SLEEPTIME_LONG/2); fprintf(logFile, "Timeout init\n"); state_init = 0; # pragma omp flush (state_init) /* wait a little moment more until we stop the test */ my_sleep(SLEEPTIME_LONG/2); fprintf(logFile, "Timeout execution\n"); state_run = 0; # pragma omp flush (state_run) } /* end of parallel */ fprintf(logFile, "Detected %d reassginments of tasks.\n", result); return (result > 0); } </ompts:testcode> </ompts:test>
functions.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include "functions.h" //compute a*b mod p safely unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } //compute a^b mod p safely unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprod(aExpb, z, p); z = modprod(z, z, p); b /= 2; } return aExpb; } //returns either 0 or 1 randomly unsigned int randomBit() { return rand()%2; } //returns a random integer which is between 2^{n-1} and 2^{n} unsigned int randXbitInt(unsigned int n) { unsigned int r = 1; for (unsigned int i=0; i<n-1; i++) { r = r*2 + randomBit(); } return r; } //tests for primality and return 1 if N is probably prime and 0 if N is composite unsigned int isProbablyPrime(unsigned int N) { if (N%2==0) return 0; //not interested in even numbers (including 2) unsigned int NsmallPrimes = 168; unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997}; //before using a probablistic primality check, check directly using the small primes list for (unsigned int n=1;n<NsmallPrimes;n++) { if (N==smallPrimeList[n]) return 1; //true if (N%smallPrimeList[n]==0) return 0; //false } //if we're testing a large number switch to Miller-Rabin primality test unsigned int r = 0; unsigned int d = N-1; while (d%2 == 0) { d /= 2; r += 1; } for (unsigned int n=0;n<NsmallPrimes;n++) { unsigned int k = smallPrimeList[n]; unsigned int x = modExp(k,d,N); if ((x==1) || (x==N-1)) continue; for (unsigned int i=1;i<r-1;i++) { x = modprod(x,x,N); if (x == 1) return 0; //false if (x == N-1) break; } // see whether we left the loop becasue x==N-1 if (x == N-1) continue; return 0; //false } return 1; //true } //Finds a generator of Z_p using the assumption that p=2*q+1 unsigned int findGenerator(unsigned int p) { unsigned int g; unsigned int q = (p-1)/2; do { //make a random number 1<= g < p g = randXbitInt(32)%p; //could also have passed n to findGenerator } while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1)); return g; } void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g, unsigned int *h, unsigned int *x) { /* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number which satisfies p=2*q+1 where q is also prime */ unsigned int q; do { *p = randXbitInt(n); q = (*p-1)/2; } while (!isProbablyPrime(*p) || !isProbablyPrime(q)); /* Use the fact that p=2*q+1 to quickly find a generator */ *g = findGenerator(*p); //pick a secret key, x *x = randXbitInt(n)%(*p); //compute h *h = modExp(*g,*x,*p); printf("ElGamal Setup successful.\n"); printf("p = %u. \n", *p); printf("g = %u is a generator of Z_%u \n", *g, *p); printf("Secret key: x = %u \n", *x); printf("h = g^x = %u\n", *h); printf("\n"); } void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int g, unsigned int h) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //pick y in Z_p randomly unsigned int y; do { y = randXbitInt(32)%p; } while (y==0); //dont allow y=0 //compute a = g^y a[i] = modExp(g,y,p); //compute s = h^y unsigned int s = modExp(h,y,p); //encrypt m by multiplying with s m[i] = modprod(m[i],s,p); } } void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int x) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //compute s = a^x unsigned int s = modExp(a[i],x,p); //compute s^{-1} = s^{p-2} unsigned int invS = modExp(s,p-2,p); //decrypt message by multplying by invS m[i] = modprod(m[i],invS,p); } } //Pad the end of string so its length is divisible by Nchars // Assume there is enough allocated storage for the padded string void padString(unsigned char* string, unsigned int charsPerInt) { /* Q1.2 Complete this function */ unsigned int length = strlen(string); do { string[length] = ' '; length++; } while (length % charsPerInt != 0); string[length] = '\0'; } void convertStringToZ(unsigned char *string, unsigned int Nchars, unsigned int *Z, unsigned int Nints) { /* Q1.3 Complete this function */ #pragma omp parallel for for (int i = 0; i < Nchars; i++) { Z[i] = (unsigned int ) string[i]; } /* Q2.2 Parallelize this function with OpenMP */ } void convertZToString(unsigned int *Z, unsigned int Nints, unsigned char *string, unsigned int Nchars) { #pragma omp parallel for /* Q1.4 Complete this function */ for (int i = 0; i < sizeof(Z); i++) { string[i] = (unsigned char) Z[i]; } /* Q2.2 Parallelize this function with OpenMP */ }
inputSingle.c
// test case for single //It contains private(), firstprivate(), nowait //TODO copyprivate() #include <stdio.h> #ifdef _OPENMP #include "omp.h" #endif int y; int main() { int x; int z=100; #pragma omp parallel { #pragma omp single private(x,y) firstprivate(z) nowait { x = omp_get_thread_num(); y = omp_get_num_threads()+z; printf("I am thread %d out of %d threads\n", \ x, y); } } return 0; }
xmpp_scram_fmt_plug.c
/* * This software is Copyright (c) 2017, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * References, * * https://tools.ietf.org/html/rfc5802 * https://tools.ietf.org/html/rfc7677 * https://wiki.xmpp.org/web/SASLandSCRAM-SHA-1 * * Hash format -> $scram$0$iterations$salt-len$salt-in-hex$hash */ #if FMT_EXTERNS_H extern struct fmt_main fmt_xmpp_scram; #elif FMT_REGISTERS_H john_register_one(&fmt_xmpp_scram); #else #include <openssl/sha.h> #include <string.h> #include "arch.h" #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "sha.h" #include "hmac_sha.h" #include "simd-intrinsics.h" #include "pbkdf2_hmac_sha1.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #if defined SIMD_COEF_32 #define SIMD_KEYS (SIMD_COEF_32 * SIMD_PARA_SHA1) #endif #define FORMAT_LABEL "xmpp-scram" #define FORMAT_NAME "" #define ALGORITHM_NAME "XMPP SCRAM PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #define PLAINTEXT_LENGTH 125 #define HASH_LENGTH 28 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define BINARY_SIZE 20 #define BINARY_ALIGN sizeof(uint32_t) #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #if !defined(SIMD_COEF_32) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #else #define MIN_KEYS_PER_CRYPT SIMD_KEYS #define MAX_KEYS_PER_CRYPT SIMD_KEYS #endif #define FORMAT_TAG "$xmpp-scram$" #define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1) static struct fmt_tests tests[] = { // hash generated by prosody-0.9.12 (taken from a .dat file) {"$xmpp-scram$0$4096$36$37333536663261622d613666622d346333642d396232622d626432646237633338343064$38f79a6e3e64c07f731570d531ec05365aa05306", "openwall123"}, // ejabberd-16.01 generated hash from "ejabberdctl dump output.txt" processed with ejabberd2john.py {"$xmpp-scram$0$4096$16$4f67aec1bd53f5f2f74652e69a3b8f32$4aec3caa8ace5180efa7a671092646c041ab1496", "qwerty"}, // ejabberd hash with a space in password {"$xmpp-scram$0$4096$16$1f7fcb384d5bcc61dfb1231ae1b32a2f$a2d076d56b0152ed557ad7d38fce93159bc63c9b", "password 123"}, {NULL} }; static struct custom_salt { uint32_t saltlen; uint32_t iterations; uint32_t type; unsigned char salt[64+1]; } *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP static int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int res, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) /* internal type */ goto err; if (!isdec(p)) goto err; if (atoi(p) != 0) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* iterations */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salten */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 64) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* hash */ goto err; if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *ctcopy, *keeptr, *p; int i; memset(&cs, 0, sizeof(cs)); ctcopy = strdup(ciphertext); keeptr = ctcopy;; ctcopy += FORMAT_TAG_LENGTH; p = strtokm(ctcopy, "$"); cs.type = atoi(p); p = strtokm(NULL, "$"); cs.iterations = atoi(p); p = strtokm(NULL, "$"); cs.saltlen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.saltlen; i++) { cs.salt[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE + 1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { int index; const int count = *pcount; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #if !defined (SIMD_COEF_32) unsigned char out[BINARY_SIZE]; SHA_CTX ctx; pbkdf2_sha1((unsigned char*)saved_key[index], strlen(saved_key[index]), cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, out, BINARY_SIZE, 0); hmac_sha1(out, BINARY_SIZE, (unsigned char*)"Client Key", 10, out, BINARY_SIZE); SHA1_Init(&ctx); SHA1_Update(&ctx, out, BINARY_SIZE); SHA1_Final((unsigned char*)crypt_out[index], &ctx); #else SHA_CTX ctx; int i; unsigned char *pin[SIMD_KEYS]; int lens[SIMD_KEYS]; unsigned char out_[SIMD_KEYS][BINARY_SIZE], *out[SIMD_KEYS]; for (i = 0; i < SIMD_KEYS; ++i) { pin[i] = (unsigned char*)saved_key[index+i]; lens[i] = strlen(saved_key[index+i]); out[i] = out_[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, out, BINARY_SIZE, 0); for (i = 0; i < SIMD_KEYS; ++i) { hmac_sha1(out[i], BINARY_SIZE, (unsigned char*)"Client Key", 10, out[i], BINARY_SIZE); SHA1_Init(&ctx); SHA1_Update(&ctx, out[i], BINARY_SIZE); SHA1_Final((unsigned char*)crypt_out[index+i], &ctx); } #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_xmpp_scram = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
openmp1.c
#include <math.h> #include <omp.h> void cholesky(double *A, double *L, int n) { for (int j = 0; j < n; j++) { #pragma omp parallel for for (int i = j; i < n; i++) { double s = 0; for (int k = 0; k < j; k++) { s += L[i * n + k] * L[j * n + k]; } L[i * n + j] = (i == j) ? sqrt(A[i * n + i] - s) : (1.0 / L[j * n + j] * (A[i * n + j] - s)); } } }
1.race2.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; #pragma omp parallel for for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) A[i][j] = A[i + 1][j - 1]; } // CHECK: Data Race detected // END
axpy_float_simdlen8.c
//axpy.c #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #define N_RUNS 1000 #define N 1200 // read timer in second double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } //Create a matrix and a vector and fill with random numbers void init(float *X, float *Y) { for (int i = 0; i<N; i++) { X[i] = (float)rand()/(float)(RAND_MAX/10.0); Y[i] = (float)rand()/(float)(RAND_MAX/10.0); } } //Our sum function- what it does is pretty straight-forward. void axpy(float *X, float *Y, float a) { #pragma omp simd simdlen(8) for (int i = 0; i<N; i++) { Y[i] += a * X[i]; } } // Debug functions void axpy_serial(float *X, float *Y, float a) { for (int i = 0; i<N; i++) { Y[i] += a * X[i]; } } void print_vector(float *vector) { printf("["); for (int i = 0; i<8; i++) { printf("%.2f ", vector[i]); } puts("]"); } float check(float *A, float *B){ float difference = 0; for(int i = 0;i<N; i++){ difference += A[i]- B[i]; } return difference; } int main(int argc, char **argv) { //Set everything up float *X = malloc(sizeof(float)*N); float *Y = malloc(sizeof(float)*N); float *Y_serial = malloc(sizeof(float)*N); float a = 3.14; srand(time(NULL)); init(X, Y); for (int i = 0; i<N; i++) Y_serial[i] = Y[i]; print_vector(Y); print_vector(X); printf("%.2f\n", a); puts("=\n"); double start = read_timer(); for (int i = 0; i<N_RUNS; i++) axpy(X, Y, a); double t = (read_timer() - start); double start_serial = read_timer(); for (int i = 0; i<N_RUNS; i++) axpy_serial(X, Y_serial, a); double t_serial = (read_timer() - start_serial); print_vector(Y); puts("---------------------------------"); print_vector(Y_serial); double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t); double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial); printf("==================================================================\n"); printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n"); printf("------------------------------------------------------------------\n"); printf("AXPY (SIMD):\t\t%4f\t%4f\n", t, gflops); printf("AXPY (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial); printf("Correctness check: %f\n", check(Y,Y_serial)); free(X); free(Y); free(Y_serial); return 0; }
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/ASTFwd.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } /// Get the iterator range for the expressions used in the clauses. Used /// expressions include only the children that must be evaluated at the /// runtime before entering the construct. child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This structure contains most locations needed for by an OMPVarListClause. struct OMPVarListLocTy { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Location of '('. SourceLocation LParenLoc; /// Ending location of the clause. SourceLocation EndLoc; OMPVarListLocTy() = default; OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {} }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'allocator' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc'. class OMPAllocatorClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression with the allocator. Stmt *Allocator = nullptr; /// Set allocator. void setAllocator(Expr *A) { Allocator = A; } public: /// Build 'allocator' clause with the given allocator. /// /// \param A Allocator. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc), Allocator(A) {} /// Build an empty clause. OMPAllocatorClause() : OMPClause(llvm::omp::OMPC_allocator, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns allocator. Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); } child_range children() { return child_range(&Allocator, &Allocator + 1); } const_child_range children() const { return const_child_range(&Allocator, &Allocator + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocator; } }; /// This represents clause 'allocate' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// and clause 'allocate' for the variable 'a'. class OMPAllocateClause final : public OMPVarListClause<OMPAllocateClause>, private llvm::TrailingObjects<OMPAllocateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Allocator specified in the clause, or 'nullptr' if the default one is /// used. Expr *Allocator = nullptr; /// Position of the ':' delimiter in the clause; SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, StartLoc, LParenLoc, EndLoc, N), Allocator(Allocator), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPAllocateClause(unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } void setAllocator(Expr *A) { Allocator = A; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Returns the allocator expression or nullptr, if no allocator is specified. Expr *getAllocator() const { return Allocator; } /// Returns the location of the ':' delimiter. SourceLocation getColonLoc() const { return ColonLoc; } /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAllocateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocate; } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(llvm::omp::OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPIfClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPFinalClause() : OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPFinalClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } const_child_range children() const { return const_child_range(&NumThreads, &NumThreads + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } const_child_range children() const { return const_child_range(&Safelen, &Safelen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } const_child_range children() const { return const_child_range(&Simdlen, &Simdlen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simdlen; } }; /// This represents the 'sizes' clause in the '#pragma omp tile' directive. /// /// \code /// #pragma omp tile sizes(5,5) /// for (int i = 0; i < 64; ++i) /// for (int j = 0; j < 64; ++j) /// \endcode class OMPSizesClause final : public OMPClause, private llvm::TrailingObjects<OMPSizesClause, Expr *> { friend class OMPClauseReader; friend class llvm::TrailingObjects<OMPSizesClause, Expr *>; /// Location of '('. SourceLocation LParenLoc; /// Number of tile sizes in the clause. unsigned NumSizes; /// Build an empty clause. explicit OMPSizesClause(int NumSizes) : OMPClause(llvm::omp::OMPC_sizes, SourceLocation(), SourceLocation()), NumSizes(NumSizes) {} public: /// Build a 'sizes' AST node. /// /// \param C Context of the AST. /// \param StartLoc Location of the 'sizes' identifier. /// \param LParenLoc Location of '('. /// \param EndLoc Location of ')'. /// \param Sizes Content of the clause. static OMPSizesClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Sizes); /// Build an empty 'sizes' AST node for deserialization. /// /// \param C Context of the AST. /// \param NumSizes Number of items in the clause. static OMPSizesClause *CreateEmpty(const ASTContext &C, unsigned NumSizes); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the number of list items. unsigned getNumSizes() const { return NumSizes; } /// Returns the tile size expressions. MutableArrayRef<Expr *> getSizesRefs() { return MutableArrayRef<Expr *>(static_cast<OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>(), NumSizes); } ArrayRef<Expr *> getSizesRefs() const { return ArrayRef<Expr *>(static_cast<const OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>(), NumSizes); } /// Sets the tile size expressions. void setSizesRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumSizes); std::copy(VL.begin(), VL.end(), static_cast<OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>()); } child_range children() { MutableArrayRef<Expr *> Sizes = getSizesRefs(); return child_range(reinterpret_cast<Stmt **>(Sizes.begin()), reinterpret_cast<Stmt **>(Sizes.end())); } const_child_range children() const { ArrayRef<Expr *> Sizes = getSizesRefs(); return const_child_range(reinterpret_cast<Stmt *const *>(Sizes.begin()), reinterpret_cast<Stmt *const *>(Sizes.end())); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_sizes; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(llvm::omp::OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(llvm::omp::OMPC_default, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::DefaultKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(llvm::omp::OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::ProcBindKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(llvm::omp::OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(llvm::omp::OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(llvm::omp::OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(llvm::omp::OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(llvm::omp::OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(llvm::omp::OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(llvm::omp::OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(llvm::omp::OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(llvm::omp::OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(llvm::omp::OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// Also, this class represents 'update' clause in '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) update(in) /// \endcode /// In this example directive '#pragma omp depobj' has 'update' clause with 'in' /// dependence kind. class OMPUpdateClause final : public OMPClause, private llvm::TrailingObjects<OMPUpdateClause, SourceLocation, OpenMPDependClauseKind> { friend class OMPClauseReader; friend TrailingObjects; /// true if extended version of the clause for 'depobj' directive. bool IsExtended = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<SourceLocation>) const { // 2 locations: for '(' and argument location. return IsExtended ? 2 : 0; } /// Sets the the location of '(' in clause for 'depobj' directive. void setLParenLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<SourceLocation>() = Loc; } /// Sets the the location of '(' in clause for 'depobj' directive. void setArgumentLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *std::next(getTrailingObjects<SourceLocation>(), 1) = Loc; } /// Sets the dependence kind for the clause for 'depobj' directive. void setDependencyKind(OpenMPDependClauseKind DK) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<OpenMPDependClauseKind>() = DK; } /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc, bool IsExtended) : OMPClause(llvm::omp::OMPC_update, StartLoc, EndLoc), IsExtended(IsExtended) {} /// Build an empty clause. OMPUpdateClause(bool IsExtended) : OMPClause(llvm::omp::OMPC_update, SourceLocation(), SourceLocation()), IsExtended(IsExtended) {} public: /// Creates clause for 'atomic' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates clause for 'depobj' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ArgumentLoc Location of the argument. /// \param DK Dependence kind. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, OpenMPDependClauseKind DK, SourceLocation EndLoc); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param IsExtended true if extended clause for 'depobj' directive must be /// created. static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended); /// Checks if the clause is the extended clauses for 'depobj' directive. bool isExtended() const { return IsExtended; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } /// Gets the the location of '(' in clause for 'depobj' directive. SourceLocation getLParenLoc() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<SourceLocation>(); } /// Gets the the location of argument in clause for 'depobj' directive. SourceLocation getArgumentLoc() const { assert(IsExtended && "Expected extended clause."); return *std::next(getTrailingObjects<SourceLocation>(), 1); } /// Gets the dependence kind in clause for 'depobj' directive. OpenMPDependClauseKind getDependencyKind() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<OpenMPDependClauseKind>(); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(llvm::omp::OMPC_capture, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_capture; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(llvm::omp::OMPC_seq_cst, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_seq_cst; } }; /// This represents 'acq_rel' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acq_rel /// \endcode /// In this example directive '#pragma omp flush' has 'acq_rel' clause. class OMPAcqRelClause final : public OMPClause { public: /// Build 'ack_rel' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acq_rel, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcqRelClause() : OMPClause(llvm::omp::OMPC_acq_rel, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acq_rel; } }; /// This represents 'acquire' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acquire /// \endcode /// In this example directive '#pragma omp flush' has 'acquire' clause. class OMPAcquireClause final : public OMPClause { public: /// Build 'acquire' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acquire, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcquireClause() : OMPClause(llvm::omp::OMPC_acquire, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acquire; } }; /// This represents 'release' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush release /// \endcode /// In this example directive '#pragma omp flush' has 'release' clause. class OMPReleaseClause final : public OMPClause { public: /// Build 'release' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_release, StartLoc, EndLoc) {} /// Build an empty clause. OMPReleaseClause() : OMPClause(llvm::omp::OMPC_release, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_release; } }; /// This represents 'relaxed' clause in the '#pragma omp atomic' /// directives. /// /// \code /// #pragma omp atomic relaxed /// \endcode /// In this example directive '#pragma omp atomic' has 'relaxed' clause. class OMPRelaxedClause final : public OMPClause { public: /// Build 'relaxed' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_relaxed, StartLoc, EndLoc) {} /// Build an empty clause. OMPRelaxedClause() : OMPClause(llvm::omp::OMPC_relaxed, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_relaxed; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPPrivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(llvm::omp::OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( llvm::omp::OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Optional lastprivate kind, e.g. 'conditional', if specified by user. OpenMPLastprivateModifier LPKind; /// Optional location of the lasptrivate kind, if specified by user. SourceLocation LPKindLoc; /// Optional colon location, if specified by user. SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(llvm::omp::OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( llvm::omp::OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } /// Sets lastprivate kind. void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; } /// Sets location of the lastprivate kind. void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; } /// Sets colon symbol location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param LPKind Lastprivate kind, e.g. 'conditional'. /// \param LPKindLoc Location of the lastprivate kind. /// \param ColonLoc Location of the ':' symbol if lastprivate kind is used. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); /// Lastprivate kind. OpenMPLastprivateModifier getKind() const { return LPKind; } /// Returns the location of the lastprivate kind. SourceLocation getKindLoc() const { return LPKindLoc; } /// Returns the location of the ':' symbol, if any. SourceLocation getColonLoc() const { return ColonLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLastprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPSharedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Reduction modifier. OpenMPReductionClauseModifier Modifier = OMPC_REDUCTION_unknown; /// Reduction modifier location. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets reduction modifier. void setModifier(OpenMPReductionClauseModifier M) { Modifier = M; } /// Sets location of the modifier. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper copy operations for inscan reductions. /// The form is: Temps[i] = LHS[i]; void setInscanCopyOps(ArrayRef<Expr *> Ops); /// Get the list of helper inscan copy operations. MutableArrayRef<Expr *> getInscanCopyOps() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyOps() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } /// Set list of helper temp vars for inscan copy array operations. void setInscanCopyArrayTemps(ArrayRef<Expr *> CopyArrayTemps); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayTemps() { return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayTemps() const { return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size()); } /// Set list of helper temp elements vars for inscan copy array operations. void setInscanCopyArrayElems(ArrayRef<Expr *> CopyArrayElems); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayElems() { return MutableArrayRef<Expr *>(getInscanCopyArrayTemps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayElems() const { return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param CopyOps List of copy operations for inscan reductions: /// \code /// TempExprs = LHSExprs; /// \endcode /// \param CopyArrayTemps Temp arrays for prefix sums. /// \param CopyArrayElems Temp arrays for prefix sums. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> CopyOps, ArrayRef<Expr *> CopyArrayTemps, ArrayRef<Expr *> CopyArrayElems, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param Modifier Reduction modifier. static OMPReductionClause * CreateEmpty(const ASTContext &C, unsigned N, OpenMPReductionClauseModifier Modifier); /// Returns modifier. OpenMPReductionClauseModifier getModifier() const { return Modifier; } /// Returns modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range copy_ops() const { return helper_expr_const_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_range copy_ops() { return helper_expr_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_const_range copy_array_temps() const { return helper_expr_const_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_range copy_array_temps() { return helper_expr_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_const_range copy_array_elems() const { return helper_expr_const_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } helper_expr_range copy_array_elems() { return helper_expr_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPReductionClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPTaskReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(llvm::omp::OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( llvm::omp::OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Gets the list of used expressions for linear variables. MutableArrayRef<Expr *> getUsedExprs() { return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1); } ArrayRef<const Expr *> getUsedExprs() const { return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); /// Sets the list of used expressions for the linear clause. void setUsedExprs(ArrayRef<Expr *> UE); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } using used_expressions_iterator = MutableArrayRef<Expr *>::iterator; using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator; using used_expressions_range = llvm::iterator_range<used_expressions_iterator>; using used_expressions_const_range = llvm::iterator_range<used_expressions_const_iterator>; used_expressions_range used_expressions() { return finals_range(getUsedExprs().begin(), getUsedExprs().end()); } used_expressions_const_range used_expressions() const { return finals_const_range(getUsedExprs().begin(), getUsedExprs().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLinearClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPLinearClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAlignedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyinClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(llvm::omp::OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( llvm::omp::OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFlushClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_flush; } }; /// This represents implicit clause 'depobj' for the '#pragma omp depobj' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// depobj' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has implicit clause 'depobj' /// with the depobj 'a'. class OMPDepobjClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Chunk size. Expr *Depobj = nullptr; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_depobj, StartLoc, EndLoc), LParenLoc(LParenLoc) {} /// Build an empty clause. /// explicit OMPDepobjClause() : OMPClause(llvm::omp::OMPC_depobj, SourceLocation(), SourceLocation()) {} void setDepobj(Expr *E) { Depobj = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Creates clause. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Depobj depobj expression associated with the 'depobj' directive. static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *Depobj); /// Creates an empty clause. /// /// \param C AST context. static OMPDepobjClause *CreateEmpty(const ASTContext &C); /// Returns depobj expression associated with the clause. Expr *getDepobj() { return Depobj; } const Expr *getDepobj() const { return Depobj; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&Depobj), reinterpret_cast<Stmt **>(&Depobj) + 1); } const_child_range children() const { auto Children = const_cast<OMPDepobjClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depobj; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Sets optional dependency modifier. void setModifier(Expr *DepModifier); public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Return optional depend modifier. Expr *getModifier(); const Expr *getModifier() const { return const_cast<OMPDependClause *>(this)->getModifier(); } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPDependClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device clause modifier. OpenMPDeviceClauseModifier Modifier = OMPC_DEVICE_unknown; /// Location of the modifier. SourceLocation ModifierLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } /// Sets modifier. void setModifier(OpenMPDeviceClauseModifier M) { Modifier = M; } /// Setst modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } public: /// Build 'device' clause. /// /// \param Modifier Clause modifier. /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param ModifierLoc Modifier location. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier), ModifierLoc(ModifierLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(llvm::omp::OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } /// Gets modifier. OpenMPDeviceClauseModifier getModifier() const { return Modifier; } /// Gets modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(&Device, &Device + 1); } const_child_range children() const { return const_child_range(&Device, &Device + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(llvm::omp::OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Pair of Expression and Non-contiguous pair associated with the /// component. llvm::PointerIntPair<Expr *, 1, bool> AssociatedExpressionNonContiguousPr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration, bool IsNonContiguous) : AssociatedExpressionNonContiguousPr(AssociatedExpression, IsNonContiguous), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpressionNonContiguousPr.getPointer(); } bool isNonContiguous() const { return AssociatedExpressionNonContiguousPr.getInt(); } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This structure contains all sizes needed for by an /// OMPMappableExprListClause. struct OMPMappableExprListSizeTy { /// Number of expressions listed. unsigned NumVars; /// Number of unique base declarations. unsigned NumUniqueDeclarations; /// Number of component lists. unsigned NumComponentLists; /// Total number of expression components. unsigned NumComponents; OMPMappableExprListSizeTy() = default; OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; /// Whether this clause is possible to have user-defined mappers associated. /// It should be true for map, to, and from clauses, and false for /// use_device_ptr and is_device_ptr. const bool SupportsMapper; /// C++ nested name specifier for the associated user-defined mapper. NestedNameSpecifierLoc MapperQualifierLoc; /// The associated user-defined mapper identifier information. DeclarationNameInfo MapperIdInfo; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. /// \param SupportsMapper Indicates whether this clause is possible to have /// user-defined mappers associated. /// \param MapperQualifierLocPtr C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfoPtr The identifier of associated user-defined mapper. OMPMappableExprListClause( OpenMPClauseKind K, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes, bool SupportsMapper = false, NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr, DeclarationNameInfo *MapperIdInfoPtr = nullptr) : OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc, Sizes.NumVars), NumUniqueDeclarations(Sizes.NumUniqueDeclarations), NumComponentLists(Sizes.NumComponentLists), NumComponents(Sizes.NumComponents), SupportsMapper(SupportsMapper) { if (MapperQualifierLocPtr) MapperQualifierLoc = *MapperQualifierLocPtr; if (MapperIdInfoPtr) MapperIdInfo = *MapperIdInfoPtr; } /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// Set the nested name specifier of associated user-defined mapper. void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) { MapperQualifierLoc = NNSL; } /// Set the name of associated user-defined mapper. void setMapperIdInfo(DeclarationNameInfo MapperId) { MapperIdInfo = MapperId; } /// Get the user-defined mapper references that are in the trailing objects of /// the class. MutableArrayRef<Expr *> getUDMapperRefs() { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeMutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Get the user-defined mappers references that are in the trailing objects /// of the class. ArrayRef<Expr *> getUDMapperRefs() const { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeArrayRef<Expr *>( static_cast<const T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Set the user-defined mappers that are in the trailing objects of the /// class. void setUDMapperRefs(ArrayRef<Expr *> DMDs) { assert(DMDs.size() == OMPVarListClause<T>::varlist_size() && "Unexpected number of user-defined mappers."); assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin()); } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Gets the nested name specifier for associated user-defined mapper. NestedNameSpecifierLoc getMapperQualifierLoc() const { return MapperQualifierLoc; } /// Gets the name info for associated user-defined mapper. const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Whether this clause is possible to have user-defined mappers associated. const bool SupportsMapper; // The user-defined mapper associated with the current declaration. ArrayRef<Expr *>::iterator MapperCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), SupportsMapper(SupportsMapper), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; if (SupportsMapper) MapperCur = Mappers.begin(); } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components, SupportsMapper, Mappers) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; if (SupportsMapper) ++MapperCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); const ValueDecl *Mapper = nullptr; if (SupportsMapper && *MapperCur) Mapper = cast<ValueDecl>(cast<DeclRefExpr>(*MapperCur)->getDecl()); return std::make_tuple( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize), Mapper); } std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; if (SupportsMapper) ++MapperCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end()), SupportsMapper, llvm::None); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } using mapperlist_iterator = MutableArrayRef<Expr *>::iterator; using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator; using mapperlist_range = llvm::iterator_range<mapperlist_iterator>; using mapperlist_const_range = llvm::iterator_range<mapperlist_const_iterator>; mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); } mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); } mapperlist_const_iterator mapperlist_begin() const { return getUDMapperRefs().begin(); } mapperlist_const_iterator mapperlist_end() const { return getUDMapperRefs().end(); } mapperlist_range mapperlists() { return mapperlist_range(mapperlist_begin(), mapperlist_end()); } mapperlist_const_range mapperlists() const { return mapperlist_const_range(mapperlist_begin(), mapperlist_end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown}; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfOMPMapClauseModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfOMPMapClauseModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMapClauseModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPMapClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom) return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { auto Children = const_cast<OMPMapClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } const_child_range children() const { return const_child_range(&NumTeams, &NumTeams + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(llvm::omp::OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } const_child_range children() const { return const_child_range(&ThreadLimit, &ThreadLimit + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param Priority Expression associated with this clause. /// \param HelperPriority Helper priority for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *Priority, Stmt *HelperPriority, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) { setPreInitStmt(HelperPriority, CaptureRegion); } /// Build an empty clause. OMPPriorityClause() : OMPClause(llvm::omp::OMPC_priority, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } const_child_range children() const { return const_child_range(&Priority, &Priority + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPPriorityClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(llvm::omp::OMPC_grainsize, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } const_child_range children() const { return const_child_range(&Grainsize, &Grainsize + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(llvm::omp::OMPC_nogroup, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(llvm::omp::OMPC_num_tasks, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } const_child_range children() const { return const_child_range(&NumTasks, &NumTasks + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNumTasksClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(llvm::omp::OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } const_child_range children() const { return const_child_range(&Hint, &Hint + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(llvm::omp::OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(llvm::omp::OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPDistScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(llvm::omp::OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(llvm::omp::OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'to' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'to' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPToClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'from' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'from' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPFromClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFromClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, Locs, Sizes) { } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_ptr; } }; /// This represents clause 'use_device_addr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_addr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_addr' with the variables 'a' and 'b'. class OMPUseDeviceAddrClause final : public OMPMappableExprListClause<OMPUseDeviceAddrClause>, private llvm::TrailingObjects< OMPUseDeviceAddrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDeviceAddrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDeviceAddrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDeviceAddrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_addr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPIsDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_is_device_ptr; } }; /// This represents clause 'nontemporal' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp simd nontemporal(a) /// \endcode /// In this example directive '#pragma omp simd' has clause 'nontemporal' for /// the variable 'a'. class OMPNontemporalClause final : public OMPVarListClause<OMPNontemporalClause>, private llvm::TrailingObjects<OMPNontemporalClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPNontemporalClause>(llvm::omp::OMPC_nontemporal, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPNontemporalClause(unsigned N) : OMPVarListClause<OMPNontemporalClause>( llvm::omp::OMPC_nontemporal, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Get the list of privatied copies if the member expression was captured by /// one of the privatization clauses. MutableArrayRef<Expr *> getPrivateRefs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateRefs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPNontemporalClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N); /// Sets the list of references to private copies created in private clauses. /// \param VL List of references. void setPrivateRefs(ArrayRef<Expr *> VL); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPNontemporalClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range private_refs() { return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()), reinterpret_cast<Stmt **>(getPrivateRefs().end())); } const_child_range private_refs() const { auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nontemporal; } }; /// This represents 'order' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp simd order(concurrent) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'order' /// clause with kind 'concurrent'. class OMPOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Argument of clause. void setKind(OpenMPOrderClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'order' clause with argument \p A ('concurrent'). /// /// \param A Argument of the clause ('concurrent'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPOrderClause() : OMPClause(llvm::omp::OMPC_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPOrderClauseKind getKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_order; } }; /// This represents the 'init' clause in '#pragma omp ...' directives. /// /// \code /// #pragma omp interop init(target:obj) /// \endcode class OMPInitClause final : public OMPVarListClause<OMPInitClause>, private llvm::TrailingObjects<OMPInitClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of interop variable. SourceLocation VarLoc; bool IsTarget = false; bool IsTargetSync = false; void setInteropVar(Expr *E) { varlist_begin()[0] = E; } void setIsTarget(bool V) { IsTarget = V; } void setIsTargetSync(bool V) { IsTargetSync = V; } /// Sets the location of the interop variable. void setVarLoc(SourceLocation Loc) { VarLoc = Loc; } /// Build 'init' clause. /// /// \param IsTarget Uses the 'target' interop-type. /// \param IsTargetSync Uses the 'targetsync' interop-type. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. /// \param N Number of expressions. OMPInitClause(bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInitClause>(llvm::omp::OMPC_init, StartLoc, LParenLoc, EndLoc, N), VarLoc(VarLoc), IsTarget(IsTarget), IsTargetSync(IsTargetSync) {} /// Build an empty clause. OMPInitClause(unsigned N) : OMPVarListClause<OMPInitClause>(llvm::omp::OMPC_init, SourceLocation(), SourceLocation(), SourceLocation(), N) { } public: /// Creates a fully specified clause. /// /// \param C AST context. /// \param InteropVar The interop variable. /// \param PrefExprs The list of preference expressions. /// \param IsTarget Uses the 'target' interop-type. /// \param IsTargetSync Uses the 'targetsync' interop-type. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. static OMPInitClause *Create(const ASTContext &C, Expr *InteropVar, ArrayRef<Expr *> PrefExprs, bool IsTarget, bool IsTargetSync, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc); /// Creates an empty clause with \a N expressions. /// /// \param C AST context. /// \param N Number of expression items. static OMPInitClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of the interop variable. SourceLocation getVarLoc() const { return VarLoc; } /// Returns the interop variable. Expr *getInteropVar() { return varlist_begin()[0]; } const Expr *getInteropVar() const { return varlist_begin()[0]; } /// Returns true is interop-type 'target' is used. bool getIsTarget() const { return IsTarget; } /// Returns true is interop-type 'targetsync' is used. bool getIsTargetSync() const { return IsTargetSync; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInitClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } using prefs_iterator = MutableArrayRef<Expr *>::iterator; using const_prefs_iterator = ArrayRef<const Expr *>::iterator; using prefs_range = llvm::iterator_range<prefs_iterator>; using const_prefs_range = llvm::iterator_range<const_prefs_iterator>; prefs_range prefs() { return prefs_range(reinterpret_cast<Expr **>(std::next(varlist_begin())), reinterpret_cast<Expr **>(varlist_end())); } const_prefs_range prefs() const { auto Prefs = const_cast<OMPInitClause *>(this)->prefs(); return const_prefs_range(Prefs.begin(), Prefs.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_init; } }; /// This represents the 'use' clause in '#pragma omp ...' directives. /// /// \code /// #pragma omp interop use(obj) /// \endcode class OMPUseClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Location of interop variable. SourceLocation VarLoc; /// The interop variable. Stmt *InteropVar = nullptr; /// Set the interop variable. void setInteropVar(Expr *E) { InteropVar = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the location of the interop variable. void setVarLoc(SourceLocation Loc) { VarLoc = Loc; } public: /// Build 'use' clause with and interop variable expression \a InteropVar. /// /// \param InteropVar The interop variable. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. OMPUseClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_use, StartLoc, EndLoc), LParenLoc(LParenLoc), VarLoc(VarLoc), InteropVar(InteropVar) {} /// Build an empty clause. OMPUseClause() : OMPClause(llvm::omp::OMPC_use, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the location of the interop variable. SourceLocation getVarLoc() const { return VarLoc; } /// Returns the interop variable. Expr *getInteropVar() const { return cast<Expr>(InteropVar); } child_range children() { return child_range(&InteropVar, &InteropVar + 1); } const_child_range children() const { return const_child_range(&InteropVar, &InteropVar + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use; } }; /// This represents 'destroy' clause in the '#pragma omp depobj' /// directive or the '#pragma omp interop' directive.. /// /// \code /// #pragma omp depobj(a) destroy /// #pragma omp interop destroy(obj) /// \endcode /// In these examples directive '#pragma omp depobj' and '#pragma omp interop' /// have a 'destroy' clause. The 'interop' directive includes an object. class OMPDestroyClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Location of interop variable. SourceLocation VarLoc; /// The interop variable. Stmt *InteropVar = nullptr; /// Set the interop variable. void setInteropVar(Expr *E) { InteropVar = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the location of the interop variable. void setVarLoc(SourceLocation Loc) { VarLoc = Loc; } public: /// Build 'destroy' clause with an interop variable expression \a InteropVar. /// /// \param InteropVar The interop variable. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param VarLoc Location of the interop variable. /// \param EndLoc Ending location of the clause. OMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation VarLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc), LParenLoc(LParenLoc), VarLoc(VarLoc), InteropVar(InteropVar) {} /// Build 'destroy' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc) {} /// Build an empty clause. OMPDestroyClause() : OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) { } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the location of the interop variable. SourceLocation getVarLoc() const { return VarLoc; } /// Returns the interop variable. Expr *getInteropVar() const { return cast_or_null<Expr>(InteropVar); } child_range children() { if (InteropVar) return child_range(&InteropVar, &InteropVar + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (InteropVar) return const_child_range(&InteropVar, &InteropVar + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_destroy; } }; /// This represents 'novariants' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp dispatch novariants(a > 5) /// \endcode /// In this example directive '#pragma omp dispatch' has simple 'novariants' /// clause with condition 'a > 5'. class OMPNovariantsClause final : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'novariants' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNovariantsClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_novariants, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPNovariantsClause() : OMPClause(llvm::omp::OMPC_novariants, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNovariantsClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_novariants; } }; /// This represents 'nocontext' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp dispatch nocontext(a > 5) /// \endcode /// In this example directive '#pragma omp dispatch' has simple 'nocontext' /// clause with condition 'a > 5'. class OMPNocontextClause final : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'nocontext' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNocontextClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nocontext, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPNocontextClause() : OMPClause(llvm::omp::OMPC_nocontext, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNocontextClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nocontext; } }; /// This represents 'detach' clause in the '#pragma omp task' directive. /// /// \code /// #pragma omp task detach(evt) /// \endcode /// In this example directive '#pragma omp detach' has simple 'detach' clause /// with the variable 'evt'. class OMPDetachClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression of the 'detach' clause. Stmt *Evt = nullptr; /// Set condition. void setEventHandler(Expr *E) { Evt = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'detach' clause with event-handler \a Evt. /// /// \param Evt Event handler expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc), LParenLoc(LParenLoc), Evt(Evt) {} /// Build an empty clause. OMPDetachClause() : OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns event-handler expression. Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); } child_range children() { return child_range(&Evt, &Evt + 1); } const_child_range children() const { return const_child_range(&Evt, &Evt + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_detach; } }; /// This represents clause 'inclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' /// with the variables 'a' and 'b'. class OMPInclusiveClause final : public OMPVarListClause<OMPInclusiveClause>, private llvm::TrailingObjects<OMPInclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPInclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInclusiveClause(unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPInclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_inclusive; } }; /// This represents clause 'exclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan exclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'exclusive' /// with the variables 'a' and 'b'. class OMPExclusiveClause final : public OMPVarListClause<OMPExclusiveClause>, private llvm::TrailingObjects<OMPExclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPExclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPExclusiveClause(unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPExclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPExclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPExclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_exclusive; } }; /// This represents clause 'uses_allocators' in the '#pragma omp target'-based /// directives. /// /// \code /// #pragma omp target uses_allocators(default_allocator, my_allocator(traits)) /// \endcode /// In this example directive '#pragma omp target' has clause 'uses_allocators' /// with the allocators 'default_allocator' and user-defined 'my_allocator'. class OMPUsesAllocatorsClause final : public OMPClause, private llvm::TrailingObjects<OMPUsesAllocatorsClause, Expr *, SourceLocation> { public: /// Data for list of allocators. struct Data { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; private: friend class OMPClauseReader; friend TrailingObjects; enum class ExprOffsets { Allocator, AllocatorTraits, Total, }; enum class ParenLocsOffsets { LParen, RParen, Total, }; /// Location of '('. SourceLocation LParenLoc; /// Total number of allocators in the clause. unsigned NumOfAllocators = 0; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of allocators asssociated with the clause. OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc), LParenLoc(LParenLoc), NumOfAllocators(N) {} /// Build an empty clause. /// \param N Number of allocators asssociated with the clause. /// explicit OMPUsesAllocatorsClause(unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(), SourceLocation()), NumOfAllocators(N) {} unsigned numTrailingObjects(OverloadToken<Expr *>) const { return NumOfAllocators * static_cast<int>(ExprOffsets::Total); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the allocators data for the clause. void setAllocatorsData(ArrayRef<OMPUsesAllocatorsClause::Data> Data); public: /// Creates clause with a list of allocators \p Data. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Data List of allocators. static OMPUsesAllocatorsClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<OMPUsesAllocatorsClause::Data> Data); /// Creates an empty clause with the place for \p N allocators. /// /// \param C AST context. /// \param N The number of allocators. static OMPUsesAllocatorsClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of allocators associated with the clause. unsigned getNumberOfAllocators() const { return NumOfAllocators; } /// Returns data for the specified allocator. OMPUsesAllocatorsClause::Data getAllocatorData(unsigned I) const; // Iterators child_range children() { Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>()); return child_range(Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } const_child_range children() const { Stmt *const *Begin = reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>()); return const_child_range( Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_uses_allocators; } }; /// This represents clause 'affinity' in the '#pragma omp task'-based /// directives. /// /// \code /// #pragma omp task affinity(iterator(i = 0:n) : ([3][n])a, b[:n], c[i]) /// \endcode /// In this example directive '#pragma omp task' has clause 'affinity' with the /// affinity modifer 'iterator(i = 0:n)' and locator items '([3][n])a', 'b[:n]' /// and 'c[i]'. class OMPAffinityClause final : public OMPVarListClause<OMPAffinityClause>, private llvm::TrailingObjects<OMPAffinityClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':' symbol. SourceLocation ColonLoc; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of locators asssociated with the clause. OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// \param N Number of locators asssociated with the clause. /// explicit OMPAffinityClause(unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the affinity modifier for the clause, if any. void setModifier(Expr *E) { getTrailingObjects<Expr *>()[varlist_size()] = E; } /// Sets the location of ':' symbol. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a modifier a list of locator items. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param Locators List of locator items. static OMPAffinityClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Creates an empty clause with the place for \p N locator items. /// /// \param C AST context. /// \param N The number of locator items. static OMPAffinityClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets affinity modifier. Expr *getModifier() { return getTrailingObjects<Expr *>()[varlist_size()]; } Expr *getModifier() const { return getTrailingObjects<Expr *>()[varlist_size()]; } /// Gets the location of ':' symbol. SourceLocation getColonLoc() const { return ColonLoc; } // Iterators child_range children() { int Offset = getModifier() ? 1 : 0; return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end() + Offset)); } const_child_range children() const { auto Children = const_cast<OMPAffinityClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_affinity; } }; /// This represents 'filter' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp masked filter(tid) /// \endcode /// In this example directive '#pragma omp masked' has 'filter' clause with /// thread id. class OMPFilterClause final : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Express of the 'filter' clause. Stmt *ThreadID = nullptr; /// Sets the thread identifier. void setThreadID(Expr *TID) { ThreadID = TID; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'filter' clause with thread-id \a ThreadID. /// /// \param ThreadID Thread identifier. /// \param HelperE Helper expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFilterClause(Expr *ThreadID, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_filter, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadID(ThreadID) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPFilterClause() : OMPClause(llvm::omp::OMPC_filter, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return thread identifier. Expr *getThreadID() { return cast<Expr>(ThreadID); } /// Return thread identifier. Expr *getThreadID() const { return cast<Expr>(ThreadID); } child_range children() { return child_range(&ThreadID, &ThreadID + 1); } const_child_range children() const { return const_child_range(&ThreadID, &ThreadID + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_filter; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) Ptr<CLASS> #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) \ RetTy Visit##Class(PTR(Class) S) { DISPATCH(Class); } #include "llvm/Frontend/OpenMP/OMP.inc" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) \ case llvm::omp::Clause::Enum: \ return Visit##Class(static_cast<PTR(Class)>(S)); #define CLAUSE_NO_CLASS(Enum, Str) \ case llvm::omp::Clause::Enum: \ break; #include "llvm/Frontend/OpenMP/OMP.inc" } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>; template <class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); /// Process motion clauses. template <typename T> void VisitOMPMotionClause(T *Node); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *S); #include "llvm/Frontend/OpenMP/OMP.inc" }; struct OMPTraitProperty { llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid; /// The raw string as we parsed it. This is needed for the `isa` trait set /// (which accepts anything) and (later) extensions. StringRef RawString; }; struct OMPTraitSelector { Expr *ScoreOrCondition = nullptr; llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid; llvm::SmallVector<OMPTraitProperty, 1> Properties; }; struct OMPTraitSet { llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid; llvm::SmallVector<OMPTraitSelector, 2> Selectors; }; /// Helper data structure representing the traits in a match clause of an /// `declare variant` or `metadirective`. The outer level is an ordered /// collection of selector sets, each with an associated kind and an ordered /// collection of selectors. A selector has a kind, an optional score/condition, /// and an ordered collection of properties. class OMPTraitInfo { /// Private constructor accesible only by ASTContext. OMPTraitInfo() {} friend class ASTContext; public: /// Reconstruct a (partial) OMPTraitInfo object from a mangled name. OMPTraitInfo(StringRef MangledName); /// The outermost level of selector sets. llvm::SmallVector<OMPTraitSet, 2> Sets; bool anyScoreOrCondition( llvm::function_ref<bool(Expr *&, bool /* IsScore */)> Cond) { return llvm::any_of(Sets, [&](OMPTraitSet &Set) { return llvm::any_of( Set.Selectors, [&](OMPTraitSelector &Selector) { return Cond(Selector.ScoreOrCondition, /* IsScore */ Selector.Kind != llvm::omp::TraitSelector::user_condition); }); }); } /// Create a variant match info object from this trait info object. While the /// former is a flat representation the actual main difference is that the /// latter uses clang::Expr to store the score/condition while the former is /// independent of clang. Thus, expressions and conditions are evaluated in /// this method. void getAsVariantMatchInfo(ASTContext &ASTCtx, llvm::omp::VariantMatchInfo &VMI) const; /// Return a string representation identifying this context selector. std::string getMangledName() const; /// Check the extension trait \p TP is active. bool isExtensionActive(llvm::omp::TraitProperty TP) { for (const OMPTraitSet &Set : Sets) { if (Set.Kind != llvm::omp::TraitSet::implementation) continue; for (const OMPTraitSelector &Selector : Set.Selectors) { if (Selector.Kind != llvm::omp::TraitSelector::implementation_extension) continue; for (const OMPTraitProperty &Property : Selector.Properties) { if (Property.Kind == TP) return true; } } } return false; } /// Print a human readable representation into \p OS. void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const; }; llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI); llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI); /// Clang specific specialization of the OMPContext to lookup target features. struct TargetOMPContext final : public llvm::omp::OMPContext { TargetOMPContext(ASTContext &ASTCtx, std::function<void(StringRef)> &&DiagUnknownTrait, const FunctionDecl *CurrentFunctionDecl); virtual ~TargetOMPContext() = default; /// See llvm::omp::OMPContext::matchesISATrait bool matchesISATrait(StringRef RawString) const override; private: std::function<bool(StringRef)> FeatureValidityCheck; std::function<void(StringRef)> DiagUnknownTrait; llvm::StringMap<bool> FeatureMap; }; /// Contains data for OpenMP directives: clauses, children /// expressions/statements (helpers for codegen) and associated statement, if /// any. class OMPChildren final : private llvm::TrailingObjects<OMPChildren, OMPClause *, Stmt *> { friend TrailingObjects; friend class OMPClauseReader; friend class OMPExecutableDirective; template <typename T> friend class OMPDeclarativeDirective; /// Numbers of clauses. unsigned NumClauses = 0; /// Number of child expressions/stmts. unsigned NumChildren = 0; /// true if the directive has associated statement. bool HasAssociatedStmt = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<OMPClause *>) const { return NumClauses; } OMPChildren() = delete; OMPChildren(unsigned NumClauses, unsigned NumChildren, bool HasAssociatedStmt) : NumClauses(NumClauses), NumChildren(NumChildren), HasAssociatedStmt(HasAssociatedStmt) {} static size_t size(unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses, Stmt *S, unsigned NumChildren = 0); static OMPChildren *CreateEmpty(void *Mem, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0); public: unsigned getNumClauses() const { return NumClauses; } unsigned getNumChildren() const { return NumChildren; } bool hasAssociatedStmt() const { return HasAssociatedStmt; } /// Set associated statement. void setAssociatedStmt(Stmt *S) { getTrailingObjects<Stmt *>()[NumChildren] = S; } void setChildren(ArrayRef<Stmt *> Children); /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPChildren *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); return getTrailingObjects<Stmt *>()[NumChildren]; } /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { return llvm::makeMutableArrayRef(getTrailingObjects<OMPClause *>(), NumClauses); } ArrayRef<OMPClause *> getClauses() const { return const_cast<OMPChildren *>(this)->getClauses(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt * getCapturedStmt(OpenMPDirectiveKind RegionKind, ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { assert(llvm::any_of( CaptureRegions, [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) { assert(hasAssociatedStmt() && "Must have associated captured statement."); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { return const_cast<OMPChildren *>(this)->getInnermostCapturedStmt( CaptureRegions); } MutableArrayRef<Stmt *> getChildren(); ArrayRef<Stmt *> getChildren() const { return const_cast<OMPChildren *>(this)->getChildren(); } Stmt *getRawStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); if (auto *CS = dyn_cast<CapturedStmt>(getAssociatedStmt())) { Stmt *S = nullptr; do { S = CS->getCapturedStmt(); CS = dyn_cast<CapturedStmt>(S); } while (CS); return S; } return getAssociatedStmt(); } const Stmt *getRawStmt() const { return const_cast<OMPChildren *>(this)->getRawStmt(); } Stmt::child_range getAssociatedStmtAsRange() { if (!HasAssociatedStmt) return Stmt::child_range(Stmt::child_iterator(), Stmt::child_iterator()); return Stmt::child_range(&getTrailingObjects<Stmt *>()[NumChildren], &getTrailingObjects<Stmt *>()[NumChildren + 1]); } }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if ((sharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; register ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*(columns+2)+x_offset); s=q-(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; register ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,gaussian_image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; register size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; register ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; register Quantum *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; PixelTrait traits; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult), q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)* mult),q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)* mult),q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register MagickRealType *magick_restrict k; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; extern const char DefaultTileFrame[]; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=(i-1)/2; geometry.y=(i-1)/2; (void) RaiseImage(preview_image,&geometry,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; register const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; register const Quantum *magick_restrict l, *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; register ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; register const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,center+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post)+ GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre)- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if ((shade_traits & CopyPixelTrait) != 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); if (status == MagickFalse) break; q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if ((unsharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
GB_unaryop__abs_uint16_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint16_fp64 // op(A') function: GB_tran__abs_uint16_fp64 // C type: uint16_t // A type: double // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint16_fp64 ( uint16_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint16_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_3x3_pack4to1_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, 16u, elempack, opt.workspace_allocator); conv3x3s1_winograd63_transform_input_pack4_bf16s_neon(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + tiles % 12 % 4, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0] \n" "sub %0, %0, #128 \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v18.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v19.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "sub %0, %0, #64 \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #256] \n" "vld4.f32 {d0-d3}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d4-d7}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d16-d19}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d20-d23}, [%0 :128] \n" "sub %0, %0, #96 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" "vst1.f32 {d4-d5}, [%1 :128]! \n" "vst1.f32 {d20-d21}, [%1 :128]! \n" "vst1.f32 {d2-d3}, [%1 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" "vst1.f32 {d22-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #256] \n" "vld4.f32 {d0-d3}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d4-d7}, [%0 :128] \n" "sub %0, %0, #32 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d4-d5}, [%1 :128]! \n" "vst1.f32 {d2-d3}, [%1 :128]! \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; #if __aarch64__ nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); float* output2_tm = top_blob_tm.channel(p + 2); float* output3_tm = top_blob_tm.channel(p + 3); float* output4_tm = top_blob_tm.channel(p + 4); float* output5_tm = top_blob_tm.channel(p + 5); float* output6_tm = top_blob_tm.channel(p + 6); float* output7_tm = top_blob_tm.channel(p + 7); const Mat kernel01_tm = kernel_tm.channel(p / 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v23.4s, v0.4s, v5.s[1] \n" "fmla v26.4s, v0.4s, v5.s[2] \n" "fmla v29.4s, v0.4s, v5.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v21.4s, v1.4s, v5.s[0] \n" "fmla v24.4s, v1.4s, v5.s[1] \n" "fmla v27.4s, v1.4s, v5.s[2] \n" "fmla v30.4s, v1.4s, v5.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "fmla v22.4s, v2.4s, v5.s[0] \n" "fmla v25.4s, v2.4s, v5.s[1] \n" "fmla v28.4s, v2.4s, v5.s[2] \n" "fmla v31.4s, v2.4s, v5.s[3] \n" "fmla v8.4s, v3.4s, v6.s[0] \n" "fmla v11.4s, v3.4s, v6.s[1] \n" "fmla v14.4s, v3.4s, v6.s[2] \n" "fmla v17.4s, v3.4s, v6.s[3] \n" "fmla v20.4s, v3.4s, v7.s[0] \n" "fmla v23.4s, v3.4s, v7.s[1] \n" "fmla v26.4s, v3.4s, v7.s[2] \n" "fmla v29.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v9.4s, v0.4s, v6.s[0] \n" "fmla v12.4s, v0.4s, v6.s[1] \n" "fmla v15.4s, v0.4s, v6.s[2] \n" "fmla v18.4s, v0.4s, v6.s[3] \n" "fmla v21.4s, v0.4s, v7.s[0] \n" "fmla v24.4s, v0.4s, v7.s[1] \n" "fmla v27.4s, v0.4s, v7.s[2] \n" "fmla v30.4s, v0.4s, v7.s[3] \n" "fmla v10.4s, v1.4s, v6.s[0] \n" "fmla v13.4s, v1.4s, v6.s[1] \n" "fmla v16.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v22.4s, v1.4s, v7.s[0] \n" "fmla v25.4s, v1.4s, v7.s[1] \n" "fmla v28.4s, v1.4s, v7.s[2] \n" "fmla v31.4s, v1.4s, v7.s[3] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "fmla v8.4s, v2.4s, v4.s[0] \n" "fmla v11.4s, v2.4s, v4.s[1] \n" "fmla v14.4s, v2.4s, v4.s[2] \n" "fmla v17.4s, v2.4s, v4.s[3] \n" "fmla v20.4s, v2.4s, v5.s[0] \n" "fmla v23.4s, v2.4s, v5.s[1] \n" "fmla v26.4s, v2.4s, v5.s[2] \n" "fmla v29.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v4.s[0] \n" "fmla v12.4s, v3.4s, v4.s[1] \n" "fmla v15.4s, v3.4s, v4.s[2] \n" "fmla v18.4s, v3.4s, v4.s[3] \n" "fmla v21.4s, v3.4s, v5.s[0] \n" "fmla v24.4s, v3.4s, v5.s[1] \n" "fmla v27.4s, v3.4s, v5.s[2] \n" "fmla v30.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v10.4s, v0.4s, v4.s[0] \n" "fmla v13.4s, v0.4s, v4.s[1] \n" "fmla v16.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v22.4s, v0.4s, v5.s[0] \n" "fmla v25.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v31.4s, v0.4s, v5.s[3] \n" "fmla v8.4s, v1.4s, v6.s[0] \n" "fmla v11.4s, v1.4s, v6.s[1] \n" "fmla v14.4s, v1.4s, v6.s[2] \n" "fmla v17.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v23.4s, v1.4s, v7.s[1] \n" "fmla v26.4s, v1.4s, v7.s[2] \n" "fmla v29.4s, v1.4s, v7.s[3] \n" "fmla v9.4s, v2.4s, v6.s[0] \n" "fmla v12.4s, v2.4s, v6.s[1] \n" "fmla v15.4s, v2.4s, v6.s[2] \n" "fmla v18.4s, v2.4s, v6.s[3] \n" "fmla v21.4s, v2.4s, v7.s[0] \n" "fmla v24.4s, v2.4s, v7.s[1] \n" "fmla v27.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "fmla v10.4s, v3.4s, v6.s[0] \n" "fmla v13.4s, v3.4s, v6.s[1] \n" "fmla v16.4s, v3.4s, v6.s[2] \n" "fmla v19.4s, v3.4s, v6.s[3] \n" "fmla v22.4s, v3.4s, v7.s[0] \n" "fmla v25.4s, v3.4s, v7.s[1] \n" "fmla v28.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" "st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n" "st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n" "st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n" "st1 {v20.4s, v21.4s, v22.4s}, [%5], #48 \n" "st1 {v23.4s, v24.4s, v25.4s}, [%6], #48 \n" "st1 {v26.4s, v27.4s, v28.4s}, [%7], #48 \n" "st1 {v29.4s, v30.4s, v31.4s}, [%8], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v18.4s, v0.4s, v4.s[1] \n" "fmla v20.4s, v0.4s, v4.s[2] \n" "fmla v22.4s, v0.4s, v4.s[3] \n" "fmla v24.4s, v0.4s, v5.s[0] \n" "fmla v26.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v30.4s, v0.4s, v5.s[3] \n" "fmla v17.4s, v1.4s, v4.s[0] \n" "fmla v19.4s, v1.4s, v4.s[1] \n" "fmla v21.4s, v1.4s, v4.s[2] \n" "fmla v23.4s, v1.4s, v4.s[3] \n" "fmla v25.4s, v1.4s, v5.s[0] \n" "fmla v27.4s, v1.4s, v5.s[1] \n" "fmla v29.4s, v1.4s, v5.s[2] \n" "fmla v31.4s, v1.4s, v5.s[3] \n" "fmla v16.4s, v2.4s, v6.s[0] \n" "fmla v18.4s, v2.4s, v6.s[1] \n" "fmla v20.4s, v2.4s, v6.s[2] \n" "fmla v22.4s, v2.4s, v6.s[3] \n" "fmla v24.4s, v2.4s, v7.s[0] \n" "fmla v26.4s, v2.4s, v7.s[1] \n" "fmla v28.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "fmla v17.4s, v3.4s, v6.s[0] \n" "fmla v19.4s, v3.4s, v6.s[1] \n" "fmla v21.4s, v3.4s, v6.s[2] \n" "fmla v23.4s, v3.4s, v6.s[3] \n" "fmla v25.4s, v3.4s, v7.s[0] \n" "fmla v27.4s, v3.4s, v7.s[1] \n" "fmla v29.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v12.4s, v8.s[0] \n" "fmla v18.4s, v12.4s, v8.s[1] \n" "fmla v20.4s, v12.4s, v8.s[2] \n" "fmla v22.4s, v12.4s, v8.s[3] \n" "fmla v24.4s, v12.4s, v9.s[0] \n" "fmla v26.4s, v12.4s, v9.s[1] \n" "fmla v28.4s, v12.4s, v9.s[2] \n" "fmla v30.4s, v12.4s, v9.s[3] \n" "fmla v17.4s, v13.4s, v8.s[0] \n" "fmla v19.4s, v13.4s, v8.s[1] \n" "fmla v21.4s, v13.4s, v8.s[2] \n" "fmla v23.4s, v13.4s, v8.s[3] \n" "fmla v25.4s, v13.4s, v9.s[0] \n" "fmla v27.4s, v13.4s, v9.s[1] \n" "fmla v29.4s, v13.4s, v9.s[2] \n" "fmla v31.4s, v13.4s, v9.s[3] \n" "fmla v16.4s, v14.4s, v10.s[0] \n" "fmla v18.4s, v14.4s, v10.s[1] \n" "fmla v20.4s, v14.4s, v10.s[2] \n" "fmla v22.4s, v14.4s, v10.s[3] \n" "fmla v24.4s, v14.4s, v11.s[0] \n" "fmla v26.4s, v14.4s, v11.s[1] \n" "fmla v28.4s, v14.4s, v11.s[2] \n" "fmla v30.4s, v14.4s, v11.s[3] \n" "fmla v17.4s, v15.4s, v10.s[0] \n" "fmla v19.4s, v15.4s, v10.s[1] \n" "fmla v21.4s, v15.4s, v10.s[2] \n" "fmla v23.4s, v15.4s, v10.s[3] \n" "fmla v25.4s, v15.4s, v11.s[0] \n" "fmla v27.4s, v15.4s, v11.s[1] \n" "fmla v29.4s, v15.4s, v11.s[2] \n" "fmla v31.4s, v15.4s, v11.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" "st1 {v20.4s, v21.4s}, [%3], #32 \n" "st1 {v22.4s, v23.4s}, [%4], #32 \n" "st1 {v24.4s, v25.4s}, [%5], #32 \n" "st1 {v26.4s, v27.4s}, [%6], #32 \n" "st1 {v28.4s, v29.4s}, [%7], #32 \n" "st1 {v30.4s, v31.4s}, [%8], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v17.4s, v0.4s, v4.s[1] \n" "fmla v18.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v21.4s, v0.4s, v5.s[1] \n" "fmla v22.4s, v0.4s, v5.s[2] \n" "fmla v23.4s, v0.4s, v5.s[3] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v1.4s, v6.s[0] \n" "fmla v17.4s, v1.4s, v6.s[1] \n" "fmla v18.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v21.4s, v1.4s, v7.s[1] \n" "fmla v22.4s, v1.4s, v7.s[2] \n" "fmla v23.4s, v1.4s, v7.s[3] \n" "fmla v16.4s, v2.4s, v8.s[0] \n" "fmla v17.4s, v2.4s, v8.s[1] \n" "fmla v18.4s, v2.4s, v8.s[2] \n" "fmla v19.4s, v2.4s, v8.s[3] \n" "fmla v20.4s, v2.4s, v9.s[0] \n" "fmla v21.4s, v2.4s, v9.s[1] \n" "fmla v22.4s, v2.4s, v9.s[2] \n" "fmla v23.4s, v2.4s, v9.s[3] \n" "fmla v16.4s, v3.4s, v10.s[0] \n" "fmla v17.4s, v3.4s, v10.s[1] \n" "fmla v18.4s, v3.4s, v10.s[2] \n" "fmla v19.4s, v3.4s, v10.s[3] \n" "fmla v20.4s, v3.4s, v11.s[0] \n" "fmla v21.4s, v3.4s, v11.s[1] \n" "fmla v22.4s, v3.4s, v11.s[2] \n" "fmla v23.4s, v3.4s, v11.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" "st1 {v18.4s}, [%3], #16 \n" "st1 {v19.4s}, [%4], #16 \n" "st1 {v20.4s}, [%5], #16 \n" "st1 {v21.4s}, [%6], #16 \n" "st1 {v22.4s}, [%7], #16 \n" "st1 {v23.4s}, [%8], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.4s}, [%9], #16 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v5.4s, v0.s[0] \n" "fmla v18.4s, v6.4s, v0.s[1] \n" "fmla v19.4s, v7.4s, v0.s[1] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v8.4s, v0.s[2] \n" "fmla v17.4s, v9.4s, v0.s[2] \n" "fmla v18.4s, v10.4s, v0.s[3] \n" "fmla v19.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "st1 {v16.s}[0], [%1], #4 \n" "st1 {v16.s}[1], [%2], #4 \n" "st1 {v16.s}[2], [%3], #4 \n" "st1 {v16.s}[3], [%4], #4 \n" "st1 {v17.s}[0], [%5], #4 \n" "st1 {v17.s}[1], [%6], #4 \n" "st1 {v17.s}[2], [%7], #4 \n" "st1 {v17.s}[3], [%8], #4 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); } } } remain_outch_start += nn_outch << 3; nn_outch = (outch - remain_outch_start) >> 2; #else // __aarch64__ nn_outch = outch >> 2; #endif // __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); float* output2_tm = top_blob_tm.channel(p + 2); float* output3_tm = top_blob_tm.channel(p + 3); #if __aarch64__ const Mat kernel01_tm = kernel_tm.channel(p / 8 + (p % 8) / 4); #else const Mat kernel01_tm = kernel_tm.channel(p / 4); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%5], #64 \n" "fmla v8.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v14.4s, v3.4s, v5.s[2] \n" "fmla v17.4s, v3.4s, v5.s[3] \n" "fmla v9.4s, v20.4s, v5.s[0] \n" "fmla v12.4s, v20.4s, v5.s[1] \n" "fmla v15.4s, v20.4s, v5.s[2] \n" "fmla v18.4s, v20.4s, v5.s[3] \n" "fmla v10.4s, v21.4s, v5.s[0] \n" "fmla v13.4s, v21.4s, v5.s[1] \n" "fmla v16.4s, v21.4s, v5.s[2] \n" "fmla v19.4s, v21.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5], #64 \n" "fmla v8.4s, v22.4s, v6.s[0] \n" "fmla v11.4s, v22.4s, v6.s[1] \n" "fmla v14.4s, v22.4s, v6.s[2] \n" "fmla v17.4s, v22.4s, v6.s[3] \n" "fmla v9.4s, v23.4s, v6.s[0] \n" "fmla v12.4s, v23.4s, v6.s[1] \n" "fmla v15.4s, v23.4s, v6.s[2] \n" "fmla v18.4s, v23.4s, v6.s[3] \n" "fmla v10.4s, v24.4s, v6.s[0] \n" "fmla v13.4s, v24.4s, v6.s[1] \n" "fmla v16.4s, v24.4s, v6.s[2] \n" "fmla v19.4s, v24.4s, v6.s[3] \n" "fmla v8.4s, v25.4s, v7.s[0] \n" "fmla v11.4s, v25.4s, v7.s[1] \n" "fmla v14.4s, v25.4s, v7.s[2] \n" "fmla v17.4s, v25.4s, v7.s[3] \n" "fmla v9.4s, v26.4s, v7.s[0] \n" "fmla v12.4s, v26.4s, v7.s[1] \n" "fmla v15.4s, v26.4s, v7.s[2] \n" "fmla v18.4s, v26.4s, v7.s[3] \n" "fmla v10.4s, v27.4s, v7.s[0] \n" "fmla v13.4s, v27.4s, v7.s[1] \n" "fmla v16.4s, v27.4s, v7.s[2] \n" "fmla v19.4s, v27.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" "st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n" "st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n" "st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif // __aarch64__ for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v10.4s, v0.4s, v4.s[1] \n" "fmla v12.4s, v0.4s, v4.s[2] \n" "fmla v14.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v11.4s, v1.4s, v4.s[1] \n" "fmla v13.4s, v1.4s, v4.s[2] \n" "fmla v15.4s, v1.4s, v4.s[3] \n" "fmla v8.4s, v2.4s, v5.s[0] \n" "fmla v10.4s, v2.4s, v5.s[1] \n" "fmla v12.4s, v2.4s, v5.s[2] \n" "fmla v14.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v13.4s, v3.4s, v5.s[2] \n" "fmla v15.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%5], #64 \n" "fmla v8.4s, v16.4s, v6.s[0] \n" "fmla v10.4s, v16.4s, v6.s[1] \n" "fmla v12.4s, v16.4s, v6.s[2] \n" "fmla v14.4s, v16.4s, v6.s[3] \n" "fmla v9.4s, v17.4s, v6.s[0] \n" "fmla v11.4s, v17.4s, v6.s[1] \n" "fmla v13.4s, v17.4s, v6.s[2] \n" "fmla v15.4s, v17.4s, v6.s[3] \n" "fmla v8.4s, v18.4s, v7.s[0] \n" "fmla v10.4s, v18.4s, v7.s[1] \n" "fmla v12.4s, v18.4s, v7.s[2] \n" "fmla v14.4s, v18.4s, v7.s[3] \n" "fmla v9.4s, v19.4s, v7.s[0] \n" "fmla v11.4s, v19.4s, v7.s[1] \n" "fmla v13.4s, v19.4s, v7.s[2] \n" "fmla v15.4s, v19.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q10, q0, d8[1] \n" "vmla.f32 q12, q0, d9[0] \n" "vmla.f32 q14, q0, d9[1] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q11, q1, d8[1] \n" "vmla.f32 q13, q1, d9[0] \n" "vmla.f32 q15, q1, d9[1] \n" "vmla.f32 q8, q2, d10[0] \n" "vmla.f32 q10, q2, d10[1] \n" "vmla.f32 q12, q2, d11[0] \n" "vmla.f32 q14, q2, d11[1] \n" "vmla.f32 q9, q3, d10[0] \n" "vmla.f32 q11, q3, d10[1] \n" "vmla.f32 q13, q3, d11[0] \n" "vmla.f32 q15, q3, d11[1] \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "vmla.f32 q8, q0, d12[0] \n" "vmla.f32 q10, q0, d12[1] \n" "vmla.f32 q12, q0, d13[0] \n" "vmla.f32 q14, q0, d13[1] \n" "vmla.f32 q9, q1, d12[0] \n" "vmla.f32 q11, q1, d12[1] \n" "vmla.f32 q13, q1, d13[0] \n" "vmla.f32 q15, q1, d13[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d14[0] \n" "vmla.f32 q10, q2, d14[1] \n" "vmla.f32 q12, q2, d15[0] \n" "vmla.f32 q14, q2, d15[1] \n" "vmla.f32 q9, q3, d14[0] \n" "vmla.f32 q11, q3, d14[1] \n" "vmla.f32 q13, q3, d15[0] \n" "vmla.f32 q15, q3, d15[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1]! \n" "vst1.f32 {d20-d23}, [%2]! \n" "vst1.f32 {d24-d27}, [%3]! \n" "vst1.f32 {d28-d31}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v0.4s, v4.s[1] \n" "fmla v10.4s, v0.4s, v4.s[2] \n" "fmla v11.4s, v0.4s, v4.s[3] \n" "fmla v8.4s, v1.4s, v5.s[0] \n" "fmla v9.4s, v1.4s, v5.s[1] \n" "fmla v10.4s, v1.4s, v5.s[2] \n" "fmla v11.4s, v1.4s, v5.s[3] \n" "fmla v8.4s, v2.4s, v6.s[0] \n" "fmla v9.4s, v2.4s, v6.s[1] \n" "fmla v10.4s, v2.4s, v6.s[2] \n" "fmla v11.4s, v2.4s, v6.s[3] \n" "fmla v8.4s, v3.4s, v7.s[0] \n" "fmla v9.4s, v3.4s, v7.s[1] \n" "fmla v10.4s, v3.4s, v7.s[2] \n" "fmla v11.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s}, [%1], #16 \n" "st1 {v9.4s}, [%2], #16 \n" "st1 {v10.4s}, [%3], #16 \n" "st1 {v11.4s}, [%4], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q0, d8[1] \n" "vmla.f32 q10, q0, d9[0] \n" "vmla.f32 q11, q0, d9[1] \n" "vmla.f32 q8, q1, d10[0] \n" "vmla.f32 q9, q1, d10[1] \n" "vmla.f32 q10, q1, d11[0] \n" "vmla.f32 q11, q1, d11[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d12[0] \n" "vmla.f32 q9, q2, d12[1] \n" "vmla.f32 q10, q2, d13[0] \n" "vmla.f32 q11, q2, d13[1] \n" "vmla.f32 q8, q3, d14[0] \n" "vmla.f32 q9, q3, d14[1] \n" "vmla.f32 q10, q3, d15[0] \n" "vmla.f32 q11, q3, d15[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1]! \n" "vst1.f32 {d18-d19}, [%2]! \n" "vst1.f32 {d20-d21}, [%3]! \n" "vst1.f32 {d22-d23}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[1] \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "st1 {v8.s}[0], [%1], #4 \n" "st1 {v8.s}[1], [%2], #4 \n" "st1 {v8.s}[2], [%3], #4 \n" "st1 {v8.s}[3], [%4], #4 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q7, d1[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vst1.f32 {d16[0]}, [%1]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vst1.f32 {d17[0]}, [%3]! \n" "vst1.f32 {d17[1]}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4); #else const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" "fmla v5.4s, v3.4s, v4.s[1] \n" "fmla v6.4s, v12.4s, v4.s[1] \n" "fmla v7.4s, v13.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n" "fmla v8.4s, v14.4s, v4.s[2] \n" "fmla v9.4s, v15.4s, v4.s[2] \n" "fmla v10.4s, v16.4s, v4.s[2] \n" "fmla v5.4s, v17.4s, v4.s[3] \n" "fmla v6.4s, v18.4s, v4.s[3] \n" "fmla v7.4s, v19.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v5.4s \n" "fadd v9.4s, v9.4s, v6.4s \n" "fadd v10.4s, v10.4s, v7.4s \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[1] \n" "fmla v11.4s, v3.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" "fmla v8.4s, v12.4s, v4.s[2] \n" "fmla v9.4s, v13.4s, v4.s[2] \n" "fmla v10.4s, v14.4s, v4.s[3] \n" "fmla v11.4s, v15.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v9.4s, v9.4s, v11.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3]! \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q10, q2, d8[1] \n" "vmla.f32 q11, q3, d8[1] \n" "pld [%2, #512] \n" "vldm %2!, {d24-d31} \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q12, d9[0] \n" "vmla.f32 q9, q13, d9[0] \n" "vmla.f32 q10, q14, d9[1] \n" "vmla.f32 q11, q15, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q9, q9, q11 \n" "vst1.f32 {d16-d19}, [%1]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "st1 {v8.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3]! \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vst1.f32 {d16-d17}, [%1]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* kptr = kernel0_tm.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); for (int q = 0; q < inch; q++) { float32x4_t _r0 = vld1q_f32(r0); float32x4_t _k0 = vld1q_f32(kptr); _sum0 = vmlaq_f32(_sum0, _r0, _k0); kptr += 4; r0 += 4; } #if __aarch64__ float sum0 = vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss2 = vpadd_f32(_ss, _ss); float sum0 = vget_lane_f32(_ss2, 0); #endif output0_tm[0] = sum0; output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u, 1, opt.workspace_allocator); } { conv3x3s1_winograd63_transform_output_bf16s_neon(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
par_csr_matvec.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" //RL: TODO par_csr_matvec_device.c, include cuda there /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec *--------------------------------------------------------------------------*/ // y = alpha*A*x + beta*b HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *x_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride( x_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( x_local ); HYPRE_Complex *x_tmp_data, **x_buf_data; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( idxstride>0 ); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size || num_rows != b_size) { ierr = 12; } if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) { ierr = 13; } hypre_assert( hypre_VectorNumVectors(b_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); } else { hypre_assert( num_vectors > 1 ); x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors ); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* x_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for x_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { /* hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); */ hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); } hypre_VectorData(x_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(x_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(x_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); hypre_SeqVectorSetDataOwner(x_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(x_tmp, HYPRE_MEMORY_DEVICE); x_tmp_data = hypre_VectorData(x_tmp); /* x_buff_data */ x_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { /* hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); */ hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); } x_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM x_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); continue; #endif } x_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); //hypre_SeqVectorPrefetch(x_local, HYPRE_MEMORY_DEVICE); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *send_data = (HYPRE_Complex *) x_buf_data[jv]; HYPRE_Complex *locl_data = x_local_data + jv * vecstride; /* if on device, no need to Sync: send_data is on device memory */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* pack send data on device */ HYPRE_THRUST_CALL( gather, hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) + hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), locl_data, send_data ); #elif defined(HYPRE_USING_DEVICE_OPENMP) /* pack send data on device */ HYPRE_Int i; HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #pragma omp target teams distribute parallel for private(i) is_device_ptr(send_data, locl_data, device_send_map_elmts) for (i = start; i < end; i++) { send_data[i] = locl_data[device_send_map_elmts[i]]; } #else HYPRE_Int i; /* pack send data on host */ #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { send_data[i] = locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)]; } #endif } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication starts */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg, HYPRE_MEMORY_DEVICE, x_buf_data[jv], HYPRE_MEMORY_DEVICE, &x_tmp_data[jv*num_cols_offd] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* computation offd part */ if (num_cols_offd) { hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local ); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif return ierr; } HYPRE_Int hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *diagT = hypre_ParCSRMatrixDiagT(A); hypre_CSRMatrix *offdT = hypre_ParCSRMatrixOffdT(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *y_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride(y_local); HYPRE_Int idxstride = hypre_VectorIndexStride(y_local); HYPRE_Complex *y_tmp_data, **y_buf_data; HYPRE_Complex *y_local_data = hypre_VectorData(y_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows != x_size) { ierr = 1; } if (num_cols != y_size) { ierr = 2; } if (num_rows != x_size && num_cols != y_size) { ierr = 3; } hypre_assert( hypre_VectorNumVectors(x_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { y_tmp = hypre_SeqVectorCreate(num_cols_offd); } else { hypre_assert( num_vectors > 1 ); y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd, num_vectors); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* y_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for y_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { //hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); } hypre_VectorData(y_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(y_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(y_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); hypre_SeqVectorSetDataOwner(y_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(y_tmp, HYPRE_MEMORY_DEVICE); y_tmp_data = hypre_VectorData(y_tmp); /* y_buf_data */ y_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { /* hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); */ hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); } y_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM y_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); continue; #endif } y_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif if (num_cols_offd) { if (offdT) { // offdT is optional. Used only if it's present hypre_CSRMatrixMatvec(alpha, offdT, x_local, 0.0, y_tmp); } else { hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { /* this is where we assume multivectors are 'column' storage */ comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 2, comm_pkg, HYPRE_MEMORY_DEVICE, &y_tmp_data[jv*num_cols_offd], HYPRE_MEMORY_DEVICE, y_buf_data[jv] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ if (diagT) { // diagT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, diagT, x_local, beta, y_local); } else { hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *recv_data = (HYPRE_Complex *) y_buf_data[jv]; HYPRE_Complex *locl_data = y_local_data + jv * vecstride; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* unpack recv data on device */ if (!hypre_ParCSRCommPkgWorkSpace(comm_pkg)) { hypre_ParCSRCommPkgWorkSpace(comm_pkg) = hypre_TAlloc( char, (2*sizeof(HYPRE_Int)+sizeof(HYPRE_Real)) * hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE ); } hypreDevice_GenScatterAdd(locl_data, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), recv_data, hypre_ParCSRCommPkgWorkSpace(comm_pkg)); #elif defined(HYPRE_USING_DEVICE_OPENMP) HYPRE_Int i, j; /* unpack recv data on device */ for (i = 0; i < num_sends; i++) { HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); #pragma omp target teams distribute parallel for private(j) is_device_ptr(recv_data, locl_data, device_send_map_elmts) for (j = start; j < end; j++) { locl_data[device_send_map_elmts[j]] += recv_data[j]; } } #else HYPRE_Int i; /* unpack recv data on host, TODO OMP? */ for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)] += recv_data[i]; } #endif } hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y, HYPRE_Int *CF_marker, HYPRE_Int fpt ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, index, start, num_procs; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Complex *x_tmp_data = NULL; HYPRE_Complex *x_buf_data = NULL; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); if (num_cols != x_size) ierr = 11; if (num_rows != y_size) ierr = 12; if (num_cols != x_size && num_rows != y_size) ierr = 13; if (num_procs > 1) { if (num_cols_offd) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_sends) x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) x_buf_data[index++] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data ); } hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker, CF_marker, fpt); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_sends) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,int_buf_data,CF_marker_offd ); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local, CF_marker, CF_marker_offd, fpt); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); } return ierr; }
parallel.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <algorithm> #ifdef PADDLE_WITH_MKLML #include <omp.h> #include "lite/backends/x86/mklml.h" #endif namespace paddle { namespace lite { namespace x86 { static void SetNumThreads(int num_threads) { #ifdef PADDLE_WITH_MKLML int real_num_threads = (std::max)(num_threads, 1); #ifdef LITE_WITH_STATIC_MKL MKL_Set_Num_Threads(real_num_threads); #else x86::MKL_Set_Num_Threads(real_num_threads); #endif omp_set_num_threads(real_num_threads); #endif } static inline int64_t GetMaxThreads() { int64_t num_threads = 1; #ifdef PADDLE_WITH_MKLML // Do not support nested omp parallem. num_threads = omp_in_parallel() ? 1 : omp_get_max_threads(); #endif return std::max<int>(num_threads, 1L); } using ThreadHandler = std::function<void(const int64_t begin, const int64_t end)>; static inline void RunParallelFor(const int64_t begin, const int64_t end, const ThreadHandler& f) { if (begin >= end) { return; } #ifdef PADDLE_WITH_MKLML int64_t num_threads = (std::min)(GetMaxThreads(), end - begin); if (num_threads > 1) { #pragma omp parallel num_threads(num_threads) { int64_t tid = omp_get_thread_num(); int64_t chunk_size = (end - begin + num_threads - 1) / num_threads; int64_t begin_tid = begin + tid * chunk_size; f(begin_tid, (std::min)(end, chunk_size + begin_tid)); } return; } #endif f(begin, end); } } // namespace x86 } // namespace lite } // namespace paddle
GB_subassign_05.c
//------------------------------------------------------------------------------ // GB_subassign_05: C(I,J)<M> = scalar ; no S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 05: C(I,J)<M> = scalar ; no S // M: present // Mask_comp: false // C_replace: false // accum: NULL // A: scalar // S: none // C: not bitmap // M: any sparsity #include "GB_subassign_methods.h" GrB_Info GB_subassign_05 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_MATRIX_WAIT_IF_JUMBLED (C) ; GB_MATRIX_WAIT_IF_JUMBLED (M) ; GB_GET_C ; // C must not be bitmap int64_t zorig = C->nzombies ; const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; const bool C_is_hyper = (Ch != NULL) ; const int64_t Cnvec = C->nvec ; GB_GET_MASK ; GB_GET_SCALAR ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 05: C(I,J)<M> = scalar ; no S //-------------------------------------------------------------------------- // Time: Close to Optimal: the method must iterate over all entries in M, // so the time is Omega(nnz(M)). For each entry M(i,j)=1, the // corresponding entry in C must be found and updated (inserted or // modified). This method does this with a binary search of C(:,jC) or a // direct lookup if C(:,jC) is dense. The time is thus O(nnz(M)*log(n)) in // the worst case, usually less than that since C(:,jC) often has O(1) // entries. An additional time of O(|J|*log(Cnvec)) is added if C is // hypersparse. There is no equivalent method that computes // C(I,J)<M>=scalar using the matrix S. // Method 05 and Method 07 are very similar. Also compare with Method 06n. //-------------------------------------------------------------------------- // Parallel: slice M into coarse/fine tasks (Method 05, 06n, 07) //-------------------------------------------------------------------------- GB_SUBASSIGN_ONE_SLICE (M) ; // M cannot be jumbled //-------------------------------------------------------------------------- // phase 1: undelete zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of M //------------------------------------------------------------------ int64_t j = GBH (Mh, k) ; GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k, Mvlen) ; int64_t mjnz = pM_end - pM ; if (mjnz == 0) continue ; //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; int64_t cjnz = pC_end - pC_start ; bool cjdense = (cjnz == Cvlen) ; //------------------------------------------------------------------ // C(I,jC)<M(:,j)> = scalar ; no S //------------------------------------------------------------------ if (cjdense) { //-------------------------------------------------------------- // C(:,jC) is dense so the binary search of C is not needed //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (mij) { int64_t iA = GBI (Mi, pM, Mvlen) ; GB_iC_DENSE_LOOKUP ; // ----[C A 1] or [X A 1]------------------------------- // [C A 1]: action: ( =A ): copy A into C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_scalar ; } } } else { //-------------------------------------------------------------- // C(:,jC) is sparse; use binary search for C //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (mij) { int64_t iA = GBI (Mi, pM, Mvlen) ; // find C(iC,jC) in C(:,jC) GB_iC_BINARY_SEARCH ; if (cij_found) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): copy A into C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_scalar ; } else { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; zorig = C->nzombies ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of M //------------------------------------------------------------------ int64_t j = GBH (Mh, k) ; GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k, Mvlen) ; int64_t mjnz = pM_end - pM ; if (mjnz == 0) continue ; //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; bool cjdense = ((pC_end - pC_start) == Cvlen) ; //------------------------------------------------------------------ // C(I,jC)<M(:,j)> = scalar ; no S //------------------------------------------------------------------ if (!cjdense) { //-------------------------------------------------------------- // C(:,jC) is sparse; use binary search for C //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (mij) { int64_t iA = GBI (Mi, pM, Mvlen) ; // find C(iC,jC) in C(:,jC) GB_iC_BINARY_SEARCH ; if (!cij_found) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) GB_PENDING_INSERT (scalar) ; } } } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
satisfiability.c
/** * @file satisfiability.c * @author Austin Gill (atgill@protonmail.com) * @brief Implementation of satisfiability functions. Yay. */ #include "satisfiability.h" bool circuit_one( const int32_t tid, const uint16_t z ) { bool bits[16] = {0}; extract_bits( bits, z ); if( ( bits[0] || bits[1] ) && ( !bits[1] || !bits[3] ) && ( bits[2] || bits[3] ) && ( !bits[3] || !bits[4] ) && ( bits[4] || !bits[5] ) && ( bits[5] || !bits[6] ) && ( bits[5] || bits[6] ) && ( bits[6] || !bits[15] ) && ( bits[7] || !bits[8] ) && ( !bits[7] || !bits[13] ) && ( bits[8] || bits[9] ) && ( bits[8] || !bits[9] ) && ( !bits[9] || !bits[10] ) && ( bits[9] || bits[11] ) && ( bits[10] || bits[11] ) && ( bits[12] || bits[13] ) && ( bits[13] || !bits[14] ) && ( bits[14] || bits[15] ) ) { printf( "%d) %d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d\n", tid, bits[0], bits[1], bits[2], bits[3], bits[4], bits[5], bits[6], bits[7], bits[8], bits[9], bits[10], bits[11], bits[12], bits[13], bits[14], bits[15] ); fflush( stdout ); return true; } return false; } bool circuit_two( const int32_t tid, const uint16_t z ) { bool bits[16] = {0}; extract_bits( bits, z ); if( ( !bits[14] || bits[11] ) && ( !bits[12] || !bits[11] ) && ( bits[2] || bits[8] ) && ( bits[2] || !bits[0] ) && ( bits[5] || bits[6] ) && ( !bits[10] || bits[13] ) && ( !bits[11] || !bits[13] ) && ( !bits[8] || bits[2] ) && ( !bits[15] || bits[13] ) && ( bits[3] || !bits[2] ) && ( !bits[8] || bits[15] ) && ( bits[6] || bits[3] ) && ( !bits[0] || bits[15] ) && ( bits[11] || bits[3] ) && ( bits[3] || bits[11] ) && ( !bits[13] || bits[11] ) && ( !bits[9] || !bits[8] ) && ( bits[12] || !bits[8] ) ) { printf( "%d) %d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d\n", tid, bits[0], bits[1], bits[2], bits[3], bits[4], bits[5], bits[6], bits[7], bits[8], bits[9], bits[10], bits[11], bits[12], bits[13], bits[14], bits[15] ); fflush( stdout ); return true; } return false; } void check_circuit( bool ( *circuit_fp )( const int32_t, const uint16_t ) ) { #ifdef SCHEDULE_COMPARISON double begin = omp_get_wtime(); size_t sum = 0; #pragma omp parallel for num_threads( omp_get_num_procs() ) for( uint16_t input = 0; input < USHRT_MAX; ++input ) { if( circuit_fp( omp_get_thread_num(), input ) ) { ++sum; } } double end = omp_get_wtime(); printf( "\n" ); printf( "=============================================================================\n" ); printf( "found %zu inputs satisfying the circuit in %4f seconds with default schedule.\n", sum, end - begin ); printf( "=============================================================================\n" ); printf( "\n" ); sum = 0; begin = omp_get_wtime(); #pragma omp parallel for num_threads( omp_get_num_procs() ) schedule( static, 1 ) for( uint32_t input = 0; input < USHRT_MAX; ++input ) { if( circuit_fp( omp_get_thread_num(), (uint16_t)input ) ) { ++sum; } } end = omp_get_wtime(); printf( "\n" ); printf( "=============================================================================\n" ); printf( "found %zu inputs satisfying the circuit in %4f seconds with static schedule.\n", sum, end - begin ); printf( "=============================================================================\n" ); printf( "\n" ); sum = 0; begin = omp_get_wtime(); #pragma omp parallel for num_threads( omp_get_num_procs() ) schedule( dynamic, 1 ) for( uint16_t input = 0; input < USHRT_MAX; ++input ) { if( circuit_fp( omp_get_thread_num(), input ) ) { ++sum; } } end = omp_get_wtime(); printf( "\n" ); printf( "=============================================================================\n" ); printf( "found %zu inputs satisfying the circuit in %4f seconds with dynamic schedule.\n", sum, end - begin ); printf( "=============================================================================\n" ); #else size_t sum = 0; #pragma omp parallel for num_threads( omp_get_num_procs() ) for( uint16_t input = 0; input < USHRT_MAX; ++input ) { if( circuit_fp( omp_get_thread_num(), input ) ) { ++sum; } } printf( "\n" ); printf( "=======================================\n" ); printf( "%zu inputs satisfied the given circuit.\n", sum ); printf( "=======================================\n" ); #endif // SCHEDULE_COMPARISON }
par_gsmg.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Geometrically smooth interpolation multigrid * *****************************************************************************/ #include <stdio.h> #include <math.h> #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "_hypre_lapack.h" #ifndef ABS #define ABS(x) ((x)>0 ? (x) : -(x)) #endif #ifndef MAX #define MAX(a,b) ((a)>(b)?(a):(b)) #endif static HYPRE_Real mydnrm2(HYPRE_Int n, HYPRE_Real *x) { HYPRE_Real temp = 0.; HYPRE_Int i; for (i = 0; i < n; i++) { temp = temp + x[i] * x[i]; } return sqrt(temp); } static void mydscal(HYPRE_Int n, HYPRE_Real a, HYPRE_Real *x) { HYPRE_Int i; for (i = 0; i < n; i++) { x[i] = a * x[i]; } } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFillSmooth * - fill in smooth matrix * - this function will scale the smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFillSmooth(HYPRE_Int nsamples, HYPRE_Real *samples, hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int num_functions, HYPRE_Int *dof_func) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j, k, ii, index, start; HYPRE_Int num_cols_offd; HYPRE_Int num_sends; HYPRE_Int *dof_func_offd; HYPRE_Int *int_buf_data; HYPRE_Real temp; HYPRE_Real *p; HYPRE_Real *p_offd; HYPRE_Real *p_ptr; HYPRE_Real *buf_data; HYPRE_Real nm; #if 0 HYPRE_Real mx = 0., my = 1.e+10; #endif /* normalize each sample vector and divide by number of samples */ for (k = 0; k < nsamples; k++) { nm = mydnrm2(n, samples + k * n); nm = 1. / nm / nsamples; mydscal(n, nm, samples + k * n); } num_cols_offd = hypre_CSRMatrixNumCols(S_offd); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); p_offd = hypre_CTAlloc(HYPRE_Real, nsamples * num_cols_offd, HYPRE_MEMORY_HOST); p_ptr = p_offd; p = samples; for (k = 0; k < nsamples; k++) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) buf_data[index++] = p[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, p_offd); hypre_ParCSRCommHandleDestroy(comm_handle); p = p + n; p_offd = p_offd + num_cols_offd; } hypre_TFree(buf_data, HYPRE_MEMORY_HOST); if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } for (i = 0; i < n; i++) { for (j = S_diag_i[i] + 1; j < S_diag_i[i + 1]; j++) { ii = S_diag_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func[ii]) { S_diag_data[j] = 0.; continue; } /* explicit zeros */ if (A_diag_data[j] == 0.) { S_diag_data[j] = 0.; continue; } temp = 0.; p = samples; for (k = 0; k < nsamples; k++) { temp = temp + ABS(p[i] - p[ii]); p = p + n; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_diag_data[j] = 0.; continue; } temp = 1. / temp; /* reciprocal */ #if 0 my = hypre_min(my, temp); mx = hypre_max(mx, temp); #endif S_diag_data[j] = temp; } for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { ii = S_offd_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func_offd[ii]) { S_offd_data[j] = 0.; continue; } /* explicit zeros */ if (A_offd_data[j] == 0.) { S_offd_data[j] = 0.; continue; } temp = 0.; p = samples; p_offd = p_ptr; for (k = 0; k < nsamples; k++) { temp = temp + ABS(p[i] - p_offd[ii]); p = p + n; p_offd = p_offd + num_cols_offd; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_offd_data[j] = 0.; continue; } temp = 1. / temp; /* reciprocal */ #if 0 my = hypre_min(my, temp); mx = hypre_max(mx, temp); #endif S_offd_data[j] = temp; } } #if 0 hypre_printf("MIN, MAX: %f %f\n", my, mx); #endif hypre_TFree(p_ptr, HYPRE_MEMORY_HOST); if (num_functions > 1) { hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); } return 0; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixChooseThresh *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParCSRMatrixChooseThresh(hypre_ParCSRMatrix *S) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j; HYPRE_Real mx, minimax = 1.e+10; HYPRE_Real minmin; for (i = 0; i < n; i++) { mx = 0.; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) { mx = hypre_max(mx, S_diag_data[j]); } for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { mx = hypre_max(mx, S_offd_data[j]); } if (mx != 0.) { minimax = hypre_min(minimax, mx); } } hypre_MPI_Allreduce(&minimax, &minmin, 1, HYPRE_MPI_REAL, hypre_MPI_MIN, comm); return minmin; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixThreshold *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixThreshold(hypre_ParCSRMatrix *A, HYPRE_Real thresh) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_nonzeros_diag = A_diag_i[n]; HYPRE_Int num_nonzeros_offd = A_offd_i[n]; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; HYPRE_Real *S_diag_data; HYPRE_Int *S_offd_i; HYPRE_Int *S_offd_j; HYPRE_Real *S_offd_data; HYPRE_Int count, i, jS, jA; /* first count the number of nonzeros we will need */ count = 0; for (i = 0; i < num_nonzeros_diag; i++) if (A_diag_data[i] >= thresh) { count++; } /* allocate vectors */ S_diag_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST); S_diag_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_diag_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i + 1]; jA++) { if (A_diag_data[jA] >= thresh) { S_diag_data[jS] = A_diag_data[jA]; S_diag_j[jS] = A_diag_j[jA]; jS++; } } } S_diag_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_diag) = jS; /* free the vectors we don't need */ hypre_TFree(A_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_diag) = S_diag_i; hypre_CSRMatrixJ(A_diag) = S_diag_j; hypre_CSRMatrixData(A_diag) = S_diag_data; /* * Offd part */ /* first count the number of nonzeros we will need */ count = 0; for (i = 0; i < num_nonzeros_offd; i++) if (A_offd_data[i] >= thresh) { count++; } /* allocate vectors */ S_offd_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST); S_offd_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_offd_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (A_offd_data[jA] >= thresh) { S_offd_data[jS] = A_offd_data[jA]; S_offd_j[jS] = A_offd_j[jA]; jS++; } } } S_offd_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_offd) = jS; /* free the vectors we don't need */ hypre_TFree(A_offd_i, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_offd) = S_offd_i; hypre_CSRMatrixJ(A_offd) = S_offd_j; hypre_CSRMatrixData(A_offd) = S_offd_data; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothVecs * - smoother depends on the level being used *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothVecs(void *data, hypre_ParCSRMatrix *A, HYPRE_Int num_sweeps, HYPRE_Int level, HYPRE_Real **SmoothVecs_p) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_ParVector *Zero; hypre_ParVector *Temp; hypre_ParVector *U; hypre_ParVector *Qtemp = NULL; HYPRE_Int i; HYPRE_BigInt n = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int n_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt *starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int sample; HYPRE_Int nsamples = hypre_ParAMGDataNumSamples(amg_data); HYPRE_Int ret; HYPRE_Real *datax, *bp, *p; HYPRE_Int rlx_type; HYPRE_Int smooth_type; HYPRE_Int smooth_option = 0; HYPRE_Int smooth_num_levels; HYPRE_Solver *smoother; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); HYPRE_Int num_threads; num_threads = hypre_NumThreads(); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (debug_flag >= 1) hypre_printf("Creating smooth dirs, %d sweeps, %d samples\n", num_sweeps, nsamples); smooth_type = hypre_ParAMGDataSmoothType(amg_data); smooth_num_levels = hypre_ParAMGDataSmoothNumLevels(amg_data); if (smooth_num_levels > level) { smooth_option = smooth_type; smoother = hypre_ParAMGDataSmoother(amg_data); num_sweeps = hypre_ParAMGDataSmoothNumSweeps(amg_data); } rlx_type = hypre_ParAMGDataGridRelaxType(amg_data)[0]; /* rlx_wt = hypre_ParAMGDataRelaxWeight(amg_data)[level]; */ /* omega = hypre_ParAMGDataOmega(amg_data)[level]; */ /* generate par vectors */ Zero = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(Zero); datax = hypre_VectorData(hypre_ParVectorLocalVector(Zero)); for (i = 0; i < n_local; i++) { datax[i] = 0.; } Temp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(Temp); datax = hypre_VectorData(hypre_ParVectorLocalVector(Temp)); for (i = 0; i < n_local; i++) { datax[i] = 0.; } U = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(U); datax = hypre_VectorData(hypre_ParVectorLocalVector(U)); if (num_threads > 1) { Qtemp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(Qtemp); } /* allocate space for the vectors */ bp = hypre_CTAlloc(HYPRE_Real, nsamples * n_local, HYPRE_MEMORY_HOST); p = bp; /* generate random vectors */ for (sample = 0; sample < nsamples; sample++) { for (i = 0; i < n_local; i++) { datax[i] = hypre_Rand() - .5; } for (i = 0; i < num_sweeps; i++) { if (smooth_option == 6) { HYPRE_SchwarzSolve(smoother[level], (HYPRE_ParCSRMatrix) A, (HYPRE_ParVector) Zero, (HYPRE_ParVector) U); } else { ret = hypre_BoomerAMGRelax(A, Zero, NULL /*CFmarker*/, rlx_type, 0 /*rel pts*/, 1.0 /*weight*/, 1.0 /*omega*/, NULL, U, Temp, Qtemp); hypre_assert(ret == 0); } } /* copy out the solution */ for (i = 0; i < n_local; i++) { *p++ = datax[i]; } } hypre_ParVectorDestroy(Zero); hypre_ParVectorDestroy(Temp); hypre_ParVectorDestroy(U); if (num_threads > 1) { hypre_ParVectorDestroy(Qtemp); } *SmoothVecs_p = bp; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothDirs replaces CreateS in AMG * - smoother depends on the level being used * - in this version, CreateSmoothVecs must be called prior to this function *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothDirs(void *data, hypre_ParCSRMatrix *A, HYPRE_Real *SmoothVecs, HYPRE_Real thresh, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data; hypre_ParCSRMatrix *S; HYPRE_Real minimax; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); S = hypre_ParCSRMatrixClone(A, 0); /* Traverse S and fill in differences */ hypre_ParCSRMatrixFillSmooth( hypre_ParAMGDataNumSamples(amg_data), SmoothVecs, S, A, num_functions, dof_func); minimax = hypre_ParCSRMatrixChooseThresh(S); if (debug_flag >= 1) { hypre_printf("Minimax chosen: %f\n", minimax); } /* Threshold and compress */ hypre_ParCSRMatrixThreshold(S, thresh * minimax); *S_ptr = S; return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGNormalizeVecs * * Normalize the smooth vectors and also make the first vector the constant * vector * * inputs: * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * * output: * V = adjusted smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGNormalizeVecs(HYPRE_Int n, HYPRE_Int num, HYPRE_Real *V) { HYPRE_Int i, j; HYPRE_Real nrm; /* change first vector to the constant vector */ for (i = 0; i < n; i++) { V[i] = 1.0; } for (j = 0; j < num; j++) { nrm = mydnrm2(n, &V[j * n]); mydscal(n, 1. / nrm, &V[j * n]); } return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGFitVectors * * Construct interpolation weights based on fitting smooth vectors * * inputs: * ip = row number of row in P being processed (0-based) * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * nc = number of coarse grid points * ind = indices of coarse grid points (0-based) * * output: * val = interpolation weights for the coarse grid points * V = smooth vectors; first one has been changed to constant vector; * vectors have also been normalized; this is also an input *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGFitVectors(HYPRE_Int ip, HYPRE_Int n, HYPRE_Int num, const HYPRE_Real *V, HYPRE_Int nc, const HYPRE_Int *ind, HYPRE_Real *val) { HYPRE_Real *a, *b; HYPRE_Real *ap; HYPRE_Int i, j; HYPRE_Real *work; HYPRE_Int work_size; HYPRE_Int info; HYPRE_Int temp; /* hypre_printf("Fit: row %d, n %d num %d, nc = %d ", ip, n, num, nc); for (i=0; i<nc; i++) hypre_printf("%d ", ind[i]); hypre_printf("\n"); */ if (nc == 0) { return 0; } work_size = 2000 * 64; work = hypre_CTAlloc(HYPRE_Real, work_size, HYPRE_MEMORY_HOST); a = hypre_CTAlloc(HYPRE_Real, num * nc, HYPRE_MEMORY_HOST); ap = a; for (j = 0; j < nc; j++) { for (i = 0; i < num; i++) { *ap = V[i * n + ind[j]]; ap++; } } temp = MAX(nc, num); b = hypre_CTAlloc(HYPRE_Real, temp, HYPRE_MEMORY_HOST); for (i = 0; i < num; i++) { b[i] = V[i * n + ip]; } { char trans = 'N'; HYPRE_Int one = 1; hypre_dgels(&trans, &num, &nc, &one, a, &num, b, &temp, work, &work_size, &info); if (info != 0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "par_gsmg: dgels returned %d\n"); } /* copy solution into output vector */ for (j = 0; j < nc; j++) { val[j] = b[j]; } } hypre_TFree(b, HYPRE_MEMORY_HOST); hypre_TFree(a, HYPRE_MEMORY_HOST); hypre_TFree(work, HYPRE_MEMORY_HOST); return info; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpLS * * Interpolation built from fitting smooth vectors * - sequential version only *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpLS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int num_smooth, HYPRE_Real *SmoothVecs, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); /* HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); */ HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); /* HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); */ HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); /* HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); */ hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; //HYPRE_Real *S_ext_data; //HYPRE_Int *S_ext_i; //HYPRE_BigInt *S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker; /* HYPRE_Int *P_marker_offd; */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; /* HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; */ HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; //HYPRE_BigInt *big_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S, S, 1); //S_ext_i = hypre_CSRMatrixI(S_ext); //S_ext_j = hypre_CSRMatrixBigJ(S_ext); //S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { /* removed */ } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } /*fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_S_offd, HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = my_first_cpt+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds();*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { HYPRE_Int kk; HYPRE_Int indices[1000]; /* kludge */ /* Diagonal part of P */ P_diag_i[i] = jj_counter; kk = 0; for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i1]; jj_counter++; indices[kk] = i1; kk++; } } hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs, kk, indices, &P_diag_data[P_diag_i[i]]); /* Off-Diagonal part of P */ /* undone */ } } } P_diag_i[i] = jj_counter; /* check that this is in right place for threads */ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) { P_marker[i] = P_offd_j[i]; } hypre_qsort0(P_marker, 0, P_offd_size - 1); num_cols_P_offd = 1; index = P_marker[0]; for (i = 1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) { tmp_map_offd[i] = P_marker[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, S, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); //hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(S_ext); } return (0); } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpGSMG * * Difference with hypre_BoomerAMGBuildInterp is that S contains values * and is used to build interpolation weights. Matrix A is not used. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpGSMG( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *tmp_map_offd = NULL; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; HYPRE_Real *S_ext_data; HYPRE_Int *S_ext_i; HYPRE_BigInt *S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row, jj_begin_row_offd; HYPRE_Int jj_end_row, jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; //HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_BigInt big_i2; HYPRE_Int i, i1, i2; HYPRE_Int j, jl, jj, jj1; HYPRE_Int start; HYPRE_Int c_num; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(S); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(S_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[0]; total_global_cpts = 0; /* we will set this later for the matrix in the setup */ /* if (myid == (num_procs -1)) total_global_cpts = coarse_pts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);*/ /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S, S, 1); S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixBigJ(S_ext); S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_S_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else { P_marker_offd[i1] = strong_f_marker; } } } jj_end_row_offd = jj_counter_offd; /* Loop over ith row of S. First, the diagonal part of S */ for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += S_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of S for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) { sum += S_diag_data[jj1]; } } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) { sum += S_offd_data[jj1]; } } } if (sum != 0) { distribute = S_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of S for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) P_diag_data[P_marker[i2]] += distribute * S_diag_data[jj1]; } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i2]] += distribute * S_offd_data[jj1]; } } } else { /* do nothing */ } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *--------------------------------------------------------------*/ else { /* do nothing */ } } /*---------------------------------------------------------------- * Still looping over ith row of S. Next, loop over the * off-diagonal part of S *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++) { i1 = S_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += S_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = S_offd_j[jj]; for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num + 1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) { /* in the diagonal block */ if (P_marker[(HYPRE_Int)(big_i2 - col_1)] >= jj_begin_row) { sum += S_ext_data[jj1]; } } else { /* in the off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd, big_i2, num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) { sum += S_ext_data[jj1]; } } } } if (sum != 0) { distribute = S_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num + 1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) /* in the diagonal block */ { if (P_marker[(HYPRE_Int)(big_i2 - col_1)] >= jj_begin_row) P_diag_data[P_marker[(HYPRE_Int)(big_i2 - col_1)]] += distribute * S_ext_data[jj1]; } else { /* check to see if it is in the off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd, big_i2, num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) P_offd_data[P_marker_offd[j]] += distribute * S_ext_data[jj1]; } } } } else { /* do nothing */ } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else { /* do nothing */ } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ sum = 0.; for (jj = jj_begin_row; jj < jj_end_row; jj++) { sum += P_diag_data[jj]; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { sum += P_offd_data[jj]; } for (jj = jj_begin_row; jj < jj_end_row; jj++) { P_diag_data[jj] /= sum; } for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { P_offd_data[jj] /= sum; } } strong_f_marker--; P_offd_i[i + 1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) { P_marker[i] = P_offd_j[i]; } hypre_qsort0(P_marker, 0, P_offd_size - 1); num_cols_P_offd = 1; index = P_marker[0]; for (i = 1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) { tmp_map_offd[i] = P_marker[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, S, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(S_ext); } return (0); }
GB_unaryop__lnot_int16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_uint64 // op(A') function: GB_tran__lnot_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_uint64 ( int16_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nested_thread_num.c
// RUN: %libomp-compile-and-run | FileCheck %s // RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> #include <unistd.h> int main() { int condition = 0; omp_set_nested(1); print_frame(0); #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_frame(0); // get all implicit task events before starting nested: #pragma omp barrier #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_ids(2); print_frame(0); OMPT_SIGNAL(condition); OMPT_WAIT(condition, 4); #pragma omp barrier print_fuzzy_address(1); print_ids(0); } print_fuzzy_address(2); print_ids(0); } print_fuzzy_address(3); return 0; } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: parallel_data initially not null // CHECK-NOT: 0: task_data initially not null // CHECK-NOT: 0: thread_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: // CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], // CHECK-SAME: parent_task_frame.exit=[[NULL]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], // CHECK-SAME: requested_team_size=2, // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // CHECK-SAME: invoker=[[PARALLEL_INVOKER:[0-9]+]] // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: // Note that we cannot ensure that the worker threads have already called // barrier_end and implicit_task_end before parallel_end! // CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: // CHECK: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // THREADS: {{^}}0: NULL_POINTER=[[NULL:.*$]] // THREADS: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]] // THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: // THREADS-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], // THREADS-SAME: parent_task_frame.exit=[[NULL]], // THREADS-SAME: parent_task_frame.reenter=0x{{[0-f]+}}, // THREADS-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2, // THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // THREADS-SAME: invoker=[[PARALLEL_INVOKER:[0-9]+]] // nested parallel masters // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: // THREADS-SAME: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]], // THREADS-SAME: team_size=2, thread_num=0 // THREADS: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // THREADS-SAME: reenter_frame=[[NULL]], // THREADS-SAME: thread_num=0 // THREADS: {{^}}[[MASTER_ID]]: task level 1: // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], // THREADS-SAME: task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], // THREADS-SAME: reenter_frame=0x{{[0-f]+}} // THREADS: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: // THREADS-SAME: parent_task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: parent_task_frame.exit=[[EXIT]], // THREADS-SAME: parent_task_frame.reenter=0x{{[0-f]+}}, // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], // THREADS-SAME: requested_team_size=2, // THREADS-SAME: codeptr_ra=[[NESTED_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // THREADS-SAME: invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]], team_size=2, // THREADS-SAME: thread_num=0 // THREADS: __builtin_frame_address({{.}})=[[NESTED_EXIT:0x[0-f]+]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: exit_frame=[[NESTED_EXIT]], reenter_frame=[[NULL]], // THREADS-SAME: thread_num=0 // THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // THREADS-SAME: reenter_frame=0x{{[0-f]+}} // THREADS: {{^}}[[MASTER_ID]]: task level 2: // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]], // THREADS-SAME: task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], // THREADS-SAME: reenter_frame=0x{{[0-f]+}} // THREADS: __builtin_frame_address(0)=[[NESTED_REENTER:0x[0-f]+]] // THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // explicit barrier // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: codeptr_ra=[[BARRIER_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: exit_frame=[[NESTED_EXIT]], reenter_frame=0x{{[0-f]+}} // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[BARRIER_RETURN_ADDRESS]] // THREADS: {{^}}[[MASTER_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: exit_frame=[[NESTED_EXIT]], reenter_frame=[[NULL]] // implicit barrier // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: invoker=[[PARALLEL_INVOKER]], // THREADS-SAME: codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[NESTED_RETURN_ADDRESS]] // THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // THREADS-SAME: reenter_frame=[[NULL]] // implicit barrier // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], // THREADS-SAME: reenter_frame=[[NULL]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], // THREADS-SAME: invoker=[[PARALLEL_INVOKER]], // THREADS-SAME: codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}} // THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // Worker of first nesting level // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: // THREADS-SAME: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]], team_size=2, // THREADS-SAME: thread_num=[[OUTER_THREADNUM:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: thread_num=[[OUTER_THREADNUM]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]], // THREADS-SAME: task_id=[[PARENT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: // THREADS-SAME: parent_task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: parent_task_frame.exit={{0x[0-f]+}}, // THREADS-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=2, // THREADS-SAME: codeptr_ra=[[NESTED_RETURN_ADDRESS]]{{[0-f][0-f]}}, // THREADS-SAME: invoker=[[PARALLEL_INVOKER]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]], team_size=2, // THREADS-SAME: thread_num=[[INNER_THREADNUM:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]], // THREADS-SAME: thread_num=[[INNER_THREADNUM]] // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], // THREADS-SAME: thread_num=[[OUTER_THREADNUM]] // THREADS: {{^}}[[THREAD_ID]]: task level 2: // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]], // THREADS-SAME: task_id=[[PARENT_TASK_ID]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[NESTED_IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]] // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // nested parallel worker threads // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS-SAME: thread_num=[[THREADNUM:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]] // THREADS-SAME: thread_num=[[THREADNUM]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, // THREADS-SAME: task_id={{[0-9]+}} // THREADS-SAME: thread_num={{[01]}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]], // THREADS-SAME: task_id=[[PARENT_TASK_ID]] // THREADS-SAME: thread_num=0 // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // other nested parallel worker threads // THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // THREADS-SAME: thread_num=[[THREADNUM:[0-9]+]] // THREADS: {{^}}[[THREAD_ID]]: task level 0: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]] // THREADS-SAME: thread_num=[[THREADNUM]] // can't reliably tell which parallel region is the parent... // THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id={{[0-9]+}}, // THREADS-SAME: task_id={{[0-9]+}} // THREADS-SAME: thread_num={{[01]}} // THREADS: {{^}}[[THREAD_ID]]: task level 2: // THREADS-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]], // THREADS-SAME: task_id=[[PARENT_TASK_ID]] // THREADS-SAME: thread_num=0 // THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: // THREADS-SAME: parallel_id=[[NESTED_PARALLEL_ID]], // THREADS-SAME: task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: // THREADS-SAME: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
NeighborhoodGraph.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_NG_H_ #define _SPTAG_COMMON_NG_H_ #include "../VectorIndex.h" #include "CommonUtils.h" #include "Dataset.h" #include "FineGrainedLock.h" #include "QueryResultSet.h" namespace SPTAG { namespace COMMON { class NeighborhoodGraph { public: NeighborhoodGraph(): m_iTPTNumber(32), m_iTPTLeafSize(2000), m_iSamples(1000), m_numTopDimensionTPTSplit(5), m_iNeighborhoodSize(32), m_iNeighborhoodScale(2), m_iCEFScale(2), m_iRefineIter(2), m_iCEF(1000), m_iAddCEF(500), m_iMaxCheckForRefineGraph(10000) {} ~NeighborhoodGraph() {} virtual void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) = 0; virtual void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) = 0; virtual float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) = 0; template <typename T> void BuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { std::cout << "build RNG graph!" << std::endl; m_iGraphSize = index->GetNumSamples(); m_iNeighborhoodSize = m_iNeighborhoodSize * m_iNeighborhoodScale; m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize); if (m_iGraphSize < 1000) { RefineGraph<T>(index, idmap); std::cout << "Build RNG Graph end!" << std::endl; return; } { COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize); std::vector<std::vector<SizeType>> TptreeDataIndices(m_iTPTNumber, std::vector<SizeType>(m_iGraphSize)); std::vector<std::vector<std::pair<SizeType, SizeType>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<SizeType, SizeType>>()); for (SizeType i = 0; i < m_iGraphSize; i++) for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) (NeighborhoodDists)[i][j] = MaxDist; std::cout << "Parallel TpTree Partition begin " << std::endl; #pragma omp parallel for schedule(dynamic) for (int i = 0; i < m_iTPTNumber; i++) { Sleep(i * 100); std::srand(clock()); for (SizeType j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j; std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end()); PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]); std::cout << "Finish Getting Leaves for Tree " << i << std::endl; } std::cout << "Parallel TpTree Partition done" << std::endl; for (int i = 0; i < m_iTPTNumber; i++) { #pragma omp parallel for schedule(dynamic) for (SizeType j = 0; j < (SizeType)TptreeLeafNodes[i].size(); j++) { SizeType start_index = TptreeLeafNodes[i][j].first; SizeType end_index = TptreeLeafNodes[i][j].second; if (omp_get_thread_num() == 0) std::cout << "\rProcessing Tree " << i << ' ' << j * 100 / TptreeLeafNodes[i].size() << '%'; for (SizeType x = start_index; x < end_index; x++) { for (SizeType y = x + 1; y <= end_index; y++) { SizeType p1 = TptreeDataIndices[i][x]; SizeType p2 = TptreeDataIndices[i][y]; float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2)); if (idmap != nullptr) { p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1); p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2); } COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize); COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize); } } } TptreeDataIndices[i].clear(); TptreeLeafNodes[i].clear(); std::cout << std::endl; } TptreeDataIndices.clear(); TptreeLeafNodes.clear(); } RefineGraph<T>(index, idmap); } template <typename T> void RefineGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { for (int iter = 0; iter < m_iRefineIter - 1; iter++) { #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false, false, m_iCEF * m_iCEFScale); if (i % 1000 == 0) std::cout << "\rRefine " << iter << " " << static_cast<int>(i * 1.0 / m_iGraphSize * 100) << "%"; } std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl; } m_iNeighborhoodSize /= m_iNeighborhoodScale; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < m_iGraphSize; i++) { RefineNode<T>(index, i, false, false, m_iCEF); if (i % 1000 == 0) std::cout << "\rRefine " << (m_iRefineIter - 1) << " " << static_cast<int>(i * 1.0 / m_iGraphSize * 100) << "%"; } std::cout << "Refine RNG, graph acc:" << GraphAccuracyEstimation(index, 100, idmap) << std::endl; if (idmap != nullptr) { for (auto iter = idmap->begin(); iter != idmap->end(); iter++) if (iter->first < 0) { m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second; } } } template <typename T> ErrorCode RefineGraph(VectorIndex* index, std::vector<SizeType>& indices, std::vector<SizeType>& reverseIndices, std::ostream* output, NeighborhoodGraph* newGraph, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { std::shared_ptr<NeighborhoodGraph> tmp; if (newGraph == nullptr) { tmp = NeighborhoodGraph::CreateInstance(Type()); newGraph = tmp.get(); } SizeType R = (SizeType)indices.size(); newGraph->m_pNeighborhoodGraph.Initialize(R, m_iNeighborhoodSize); newGraph->m_iGraphSize = R; newGraph->m_iNeighborhoodSize = m_iNeighborhoodSize; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < R; i++) { if (i % 1000 == 0) std::cout << "\rRefine " << static_cast<int>(i * 1.0 / R * 100) << "%"; SizeType *outnodes = newGraph->m_pNeighborhoodGraph[i]; COMMON::QueryResultSet<T> query((const T*)index->GetSample(indices[i]), m_iCEF + 1); index->RefineSearchIndex(query, false); RebuildNeighbors(index, indices[i], outnodes, query.GetResults(), m_iCEF + 1); std::unordered_map<SizeType, SizeType>::const_iterator iter; for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if (outnodes[j] >= 0 && outnodes[j] < reverseIndices.size()) outnodes[j] = reverseIndices[outnodes[j]]; if (idmap != nullptr && (iter = idmap->find(outnodes[j])) != idmap->end()) outnodes[j] = iter->second; } if (idmap != nullptr && (iter = idmap->find(-1 - i)) != idmap->end()) outnodes[m_iNeighborhoodSize - 1] = -2 - iter->second; } if (output != nullptr) newGraph->SaveGraph(*output); return ErrorCode::Success; } template <typename T> void RefineNode(VectorIndex* index, const SizeType node, bool updateNeighbors, bool searchDeleted, int CEF) { COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), CEF + 1); index->RefineSearchIndex(query, searchDeleted); RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), CEF + 1); if (updateNeighbors) { // update neighbors for (int j = 0; j <= CEF; j++) { BasicResult* item = query.GetResult(j); if (item->VID < 0) break; if (item->VID == node) continue; InsertNeighbors(index, item->VID, node, item->Dist); } } } template <typename T> void PartitionByTptree(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last, std::vector<std::pair<SizeType, SizeType>> & leaves) { if (last - first <= m_iTPTLeafSize) { leaves.emplace_back(first, last); } else { std::vector<float> Mean(index->GetFeatureDim(), 0); int iIteration = 100; SizeType end = min(first + m_iSamples, last); SizeType count = end - first + 1; // calculate the mean of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] += v[k]; } } for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { Mean[k] /= count; } std::vector<BasicResult> Variance; Variance.reserve(index->GetFeatureDim()); for (DimensionType j = 0; j < index->GetFeatureDim(); j++) { Variance.emplace_back(j, 0.0f); } // calculate the variance of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)index->GetSample(indices[j]); for (DimensionType k = 0; k < index->GetFeatureDim(); k++) { float dist = v[k] - Mean[k]; Variance[k].Dist += dist*dist; } } std::sort(Variance.begin(), Variance.end(), COMMON::Compare); std::vector<SizeType> indexs(m_numTopDimensionTPTSplit); std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit); float bestvariance = Variance[index->GetFeatureDim() - 1].Dist; for (int i = 0; i < m_numTopDimensionTPTSplit; i++) { indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID; bestweight[i] = 0; } bestweight[0] = 1; float bestmean = Mean[indexs[0]]; std::vector<float> Val(count); for (int i = 0; i < iIteration; i++) { float sumweight = 0; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] = float(rand() % 10000) / 5000.0f - 1.0f; sumweight += weight[j] * weight[j]; } sumweight = sqrt(sumweight); for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { weight[j] /= sumweight; } float mean = 0; for (SizeType j = 0; j < count; j++) { Val[j] = 0; const T* v = (const T*)index->GetSample(indices[first + j]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { Val[j] += weight[k] * v[indexs[k]]; } mean += Val[j]; } mean /= count; float var = 0; for (SizeType j = 0; j < count; j++) { float dist = Val[j] - mean; var += dist * dist; } if (var > bestvariance) { bestvariance = var; bestmean = mean; for (int j = 0; j < m_numTopDimensionTPTSplit; j++) { bestweight[j] = weight[j]; } } } SizeType i = first; SizeType j = last; // decide which child one point belongs while (i <= j) { float val = 0; const T* v = (const T*)index->GetSample(indices[i]); for (int k = 0; k < m_numTopDimensionTPTSplit; k++) { val += bestweight[k] * v[indexs[k]]; } if (val < bestmean) { i++; } else { std::swap(indices[i], indices[j]); j--; } } // if all the points in the node are equal,equally split the node into 2 if ((i == first) || (i == last + 1)) { i = (first + last + 1) / 2; } Mean.clear(); Variance.clear(); Val.clear(); indexs.clear(); weight.clear(); bestweight.clear(); PartitionByTptree<T>(index, indices, first, i - 1, leaves); PartitionByTptree<T>(index, indices, i, last, leaves); } } inline std::uint64_t BufferSize() const { return m_pNeighborhoodGraph.BufferSize(); } bool LoadGraph(std::string sGraphFilename) { if (!m_pNeighborhoodGraph.Load(sGraphFilename)) return false; m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return true; } bool LoadGraph(char* pGraphMemFile) { m_pNeighborhoodGraph.Load(pGraphMemFile); m_iGraphSize = m_pNeighborhoodGraph.R(); m_iNeighborhoodSize = m_pNeighborhoodGraph.C(); return true; } bool SaveGraph(std::string sGraphFilename) const { std::cout << "Save " << m_pNeighborhoodGraph.Name() << " To " << sGraphFilename << std::endl; std::ofstream output(sGraphFilename, std::ios::binary); if (!output.is_open()) return false; SaveGraph(output); output.close(); return true; } bool SaveGraph(std::ostream& output) const { output.write((char*)&m_iGraphSize, sizeof(SizeType)); output.write((char*)&m_iNeighborhoodSize, sizeof(DimensionType)); for (SizeType i = 0; i < m_iGraphSize; i++) output.write((char*)m_pNeighborhoodGraph[i], sizeof(SizeType) * m_iNeighborhoodSize); std::cout << "Save " << m_pNeighborhoodGraph.Name() << " (" << m_iGraphSize << ", " << m_iNeighborhoodSize << ") Finish!" << std::endl; return true; } inline ErrorCode AddBatch(SizeType num) { ErrorCode ret = m_pNeighborhoodGraph.AddBatch(num); if (ret != ErrorCode::Success) return ret; m_iGraphSize += num; return ErrorCode::Success; } inline SizeType* operator[](SizeType index) { return m_pNeighborhoodGraph[index]; } inline const SizeType* operator[](SizeType index) const { return m_pNeighborhoodGraph[index]; } void Update(SizeType row, DimensionType col, SizeType val) { std::lock_guard<std::mutex> lock(m_dataUpdateLock[row]); m_pNeighborhoodGraph[row][col] = val; } inline void SetR(SizeType rows) { m_pNeighborhoodGraph.SetR(rows); m_iGraphSize = rows; } inline SizeType R() const { return m_iGraphSize; } inline std::string Type() const { return m_pNeighborhoodGraph.Name(); } static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type); protected: // Graph structure SizeType m_iGraphSize; COMMON::Dataset<SizeType> m_pNeighborhoodGraph; FineGrainedLock m_dataUpdateLock; public: int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit; DimensionType m_iNeighborhoodSize; int m_iNeighborhoodScale, m_iCEFScale, m_iRefineIter, m_iCEF, m_iAddCEF, m_iMaxCheckForRefineGraph; }; } } #endif
hcb_basis_core.h
#ifndef _HCB_BASIS_CORE_H #define _HCB_BASIS_CORE_H #include <complex> #include <vector> #include <iostream> #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "benes_perm.h" #include "openmp.h" namespace basis_general { template<class I> I inline hcb_map_bits(I s,const int map[],const int N){ I ss = 0; for(int i=N-1;i>=0;--i){ int j = map[i]; ss ^= (j<0 ? ((s&1)^1)<<(N+j) : (s&1)<<(N-j-1) ); s >>= 1; } return ss; } template<class I,class P=signed char> class hcb_basis_core : public general_basis_core<I,P> { public: std::vector<tr_benes<I>> benes_maps; std::vector<I> invs; hcb_basis_core(const int _N, const bool _fermionic=false,const bool _pre_check=false) : \ general_basis_core<I>::general_basis_core(_N,_fermionic,_pre_check) {} hcb_basis_core(const int _N,const int _nt,const int _maps[], \ const int _pers[], const int _qs[], const bool _fermionic=false,const bool _pre_check=false) : \ general_basis_core<I>::general_basis_core(_N,_nt,_maps,_pers,_qs,_fermionic,_pre_check) { benes_maps.resize(_nt); invs.resize(_nt); ta_index<I> index; for(int j=0;j<bit_info<I>::bits;j++){index.data[j] = no_index;} for(int i=0;i<_nt;i++){ const int * map = &general_basis_core<I,P>::maps[i*_N]; I inv = 0; for(int j=0;j<_N;j++){ int m = map[j]; int bit_j = _N - j - 1; if(m<0){ int bit_m = _N + m; index.data[bit_j] = bit_m; inv ^= ((I)1 << bit_j); } else{ int bit_m = _N - m -1; index.data[bit_j] = bit_m; } } gen_benes<I>(&benes_maps[i],index); invs[i] = inv; } } ~hcb_basis_core() {} npy_intp get_prefix(const I s,const int N_p){ return integer_cast<npy_intp,I>(s >> (general_basis_core<I,P>::N - N_p)); } I map_state(I s,int n_map,P &sign){ if(general_basis_core<I,P>::nt<=0){ return s; } return benes_bwd(&benes_maps[n_map],s^invs[n_map]); } void map_state(I s[],npy_intp M,int n_map,P sign[]){ if(general_basis_core<I,P>::nt<=0){ return; } const tr_benes<I> * benes_map = &benes_maps[n_map]; const I inv = invs[n_map]; #pragma omp for schedule(static) for(npy_intp i=0;i<M;i++){ s[i] = benes_bwd(benes_map,s[i]^inv); } } std::vector<int> count_particles(const I s){ std::vector<int> v(1); v[0] = bit_count(s,general_basis_core<I,P>::N); return v; } // I map_state(I s,int n_map,int &sign){ // if(general_basis_core<I,P>::nt<=0){ // return s; // } // const int n = general_basis_core<I,P>::N; // return hcb_map_bits(s,&general_basis_core<I,P>::maps[n_map*n],n); // } // void map_state(I s[],npy_intp M,int n_map,signed char sign[]){ // if(general_basis_core<I,P>::nt<=0){ // return; // } // const int n = general_basis_core<I,P>::N; // const int * map = &general_basis_core<I,P>::maps[n_map*n]; // #pragma omp for schedule(static,1) // for(npy_intp i=0;i<M;i++){ // s[i] = hcb_map_bits(s[i],map,n); // } // } I inline next_state_pcon(const I s,const I nns){ if(s==0){return s;} I t = (s | (s - 1)) + 1; return t | ((((t & (0-t)) / (s & (0-s))) >> 1) - 1); } int op(I &r,std::complex<double> &m,const int n_op,const char opstr[],const int indx[]){ const I s = r; const I one = 1; const int NN = general_basis_core<I,P>::N; for(int j=n_op-1;j>-1;j--){ const int ind = NN-indx[j]-1; const I b = (one << ind); const bool a = (bool)((r >> ind)&one); const char op = opstr[j]; switch(op){ case 'z': m *= (a?0.5:-0.5); break; case 'n': m *= (a?1:0); break; case 'x': r ^= b; m *= 0.5; break; case 'y': m *= (a?std::complex<double>(0,0.5):std::complex<double>(0,-0.5)); r ^= b; break; case '+': m *= (a?0:1); r ^= b; break; case '-': m *= (a?1:0); r ^= b; break; case 'I': break; default: return -1; } if(m.real()==0 && m.imag()==0){ r = s; break; } } return 0; } }; } #endif
nvector_openmp.c
/* * ----------------------------------------------------------------- * $Revision: 4869 $ * $Date: 2016-08-19 10:34:20 -0700 (Fri, 19 Aug 2016) $ * ----------------------------------------------------------------- * Programmer(s): David J. Gardner and Carol S. Woodward @ LLNL * ----------------------------------------------------------------- * Acknowledgements: This NVECTOR module is based on the NVECTOR * Serial module by Scott D. Cohen, Alan C. * Hindmarsh, Radu Serban, and Aaron Collier * @ LLNL * ----------------------------------------------------------------- * LLNS Copyright Start * Copyright (c) 2014, Lawrence Livermore National Security * This work was performed under the auspices of the U.S. Department * of Energy by Lawrence Livermore National Laboratory in part under * Contract W-7405-Eng-48 and in part under Contract DE-AC52-07NA27344. * Produced at the Lawrence Livermore National Laboratory. * All rights reserved. * For details, see the LICENSE file. * LLNS Copyright End * ----------------------------------------------------------------- * This is the implementation file for an OpenMP implementation * of the NVECTOR module. * ----------------------------------------------------------------- */ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_math.h> #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) /* Private function prototypes */ /* z=x */ static void VCopy_OpenMP(N_Vector x, N_Vector z); /* z=x+y */ static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */ static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=-x */ static void VNeg_OpenMP(N_Vector x, N_Vector z); /* z=c(x+y) */ static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */ static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */ static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */ static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* y <- ax+y */ static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y); /* x <- ax */ static void VScaleBy_OpenMP(realtype a, N_Vector x); /* * ----------------------------------------------------------------- * exported functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_OpenMP(N_Vector v) { return SUNDIALS_NVEC_OPENMP; } /* ---------------------------------------------------------------------------- * Function to create a new empty vector */ N_Vector N_VNewEmpty_OpenMP(long int length, int num_threads) { N_Vector v; N_Vector_Ops ops; N_VectorContent_OpenMP content; /* Create vector */ v = NULL; v = (N_Vector) malloc(sizeof *v); if (v == NULL) return(NULL); /* Create vector operation structure */ ops = NULL; ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops)); if (ops == NULL) { free(v); return(NULL); } ops->nvgetvectorid = N_VGetVectorID_OpenMP; ops->nvclone = N_VClone_OpenMP; ops->nvcloneempty = N_VCloneEmpty_OpenMP; ops->nvdestroy = N_VDestroy_OpenMP; ops->nvspace = N_VSpace_OpenMP; ops->nvgetarraypointer = N_VGetArrayPointer_OpenMP; ops->nvsetarraypointer = N_VSetArrayPointer_OpenMP; ops->nvlinearsum = N_VLinearSum_OpenMP; ops->nvconst = N_VConst_OpenMP; ops->nvprod = N_VProd_OpenMP; ops->nvdiv = N_VDiv_OpenMP; ops->nvscale = N_VScale_OpenMP; ops->nvabs = N_VAbs_OpenMP; ops->nvinv = N_VInv_OpenMP; ops->nvaddconst = N_VAddConst_OpenMP; ops->nvdotprod = N_VDotProd_OpenMP; ops->nvmaxnorm = N_VMaxNorm_OpenMP; ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMP; ops->nvwrmsnorm = N_VWrmsNorm_OpenMP; ops->nvmin = N_VMin_OpenMP; ops->nvwl2norm = N_VWL2Norm_OpenMP; ops->nvl1norm = N_VL1Norm_OpenMP; ops->nvcompare = N_VCompare_OpenMP; ops->nvinvtest = N_VInvTest_OpenMP; ops->nvconstrmask = N_VConstrMask_OpenMP; ops->nvminquotient = N_VMinQuotient_OpenMP; /* Create content */ content = NULL; content = (N_VectorContent_OpenMP) malloc(sizeof(struct _N_VectorContent_OpenMP)); if (content == NULL) { free(ops); free(v); return(NULL); } content->length = length; content->num_threads = num_threads; content->own_data = FALSE; content->data = NULL; /* Attach content and ops */ v->content = content; v->ops = ops; return(v); } /* ---------------------------------------------------------------------------- * Function to create a new vector */ N_Vector N_VNew_OpenMP(long int length, int num_threads) { N_Vector v; realtype *data; v = NULL; v = N_VNewEmpty_OpenMP(length, num_threads); if (v == NULL) return(NULL); /* Create data */ if (length > 0) { /* Allocate memory */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); } /* Attach data */ NV_OWN_DATA_OMP(v) = TRUE; NV_DATA_OMP(v) = data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create a vector with user data component */ N_Vector N_VMake_OpenMP(long int length, realtype *v_data, int num_threads) { N_Vector v; v = NULL; v = N_VNewEmpty_OpenMP(length, num_threads); if (v == NULL) return(NULL); if (length > 0) { /* Attach data */ NV_OWN_DATA_OMP(v) = FALSE; NV_DATA_OMP(v) = v_data; } return(v); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors. */ N_Vector *N_VCloneVectorArray_OpenMP(int count, N_Vector w) { N_Vector *vs; int j; if (count <= 0) return(NULL); vs = NULL; vs = (N_Vector *) malloc(count * sizeof(N_Vector)); if(vs == NULL) return(NULL); for (j = 0; j < count; j++) { vs[j] = NULL; vs[j] = N_VClone_OpenMP(w); if (vs[j] == NULL) { N_VDestroyVectorArray_OpenMP(vs, j-1); return(NULL); } } return(vs); } /* ---------------------------------------------------------------------------- * Function to create an array of new vectors with NULL data array. */ N_Vector *N_VCloneVectorArrayEmpty_OpenMP(int count, N_Vector w) { N_Vector *vs; int j; if (count <= 0) return(NULL); vs = NULL; vs = (N_Vector *) malloc(count * sizeof(N_Vector)); if(vs == NULL) return(NULL); for (j = 0; j < count; j++) { vs[j] = NULL; vs[j] = N_VCloneEmpty_OpenMP(w); if (vs[j] == NULL) { N_VDestroyVectorArray_OpenMP(vs, j-1); return(NULL); } } return(vs); } /* ---------------------------------------------------------------------------- * Function to free an array created with N_VCloneVectorArray_OpenMP */ void N_VDestroyVectorArray_OpenMP(N_Vector *vs, int count) { int j; for (j = 0; j < count; j++) N_VDestroy_OpenMP(vs[j]); free(vs); vs = NULL; return; } /* ---------------------------------------------------------------------------- * Function to return number of vector elements */ long int N_VGetLength_OpenMP(N_Vector v) { return NV_LENGTH_OMP(v); } /* ---------------------------------------------------------------------------- * Function to print a vector */ void N_VPrint_OpenMP(N_Vector x) { long int i, N; realtype *xd; xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); for (i = 0; i < N; i++) { #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%11.8Lg\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%11.8g\n", xd[i]); #else printf("%11.8g\n", xd[i]); #endif } printf("\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Create new vector from existing vector without attaching data */ N_Vector N_VCloneEmpty_OpenMP(N_Vector w) { N_Vector v; N_Vector_Ops ops; N_VectorContent_OpenMP content; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = (N_Vector) malloc(sizeof *v); if (v == NULL) return(NULL); /* Create vector operation structure */ ops = NULL; ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops)); if (ops == NULL) { free(v); return(NULL); } ops->nvgetvectorid = w->ops->nvgetvectorid; ops->nvclone = w->ops->nvclone; ops->nvcloneempty = w->ops->nvcloneempty; ops->nvdestroy = w->ops->nvdestroy; ops->nvspace = w->ops->nvspace; ops->nvgetarraypointer = w->ops->nvgetarraypointer; ops->nvsetarraypointer = w->ops->nvsetarraypointer; ops->nvlinearsum = w->ops->nvlinearsum; ops->nvconst = w->ops->nvconst; ops->nvprod = w->ops->nvprod; ops->nvdiv = w->ops->nvdiv; ops->nvscale = w->ops->nvscale; ops->nvabs = w->ops->nvabs; ops->nvinv = w->ops->nvinv; ops->nvaddconst = w->ops->nvaddconst; ops->nvdotprod = w->ops->nvdotprod; ops->nvmaxnorm = w->ops->nvmaxnorm; ops->nvwrmsnormmask = w->ops->nvwrmsnormmask; ops->nvwrmsnorm = w->ops->nvwrmsnorm; ops->nvmin = w->ops->nvmin; ops->nvwl2norm = w->ops->nvwl2norm; ops->nvl1norm = w->ops->nvl1norm; ops->nvcompare = w->ops->nvcompare; ops->nvinvtest = w->ops->nvinvtest; ops->nvconstrmask = w->ops->nvconstrmask; ops->nvminquotient = w->ops->nvminquotient; /* Create content */ content = NULL; content = (N_VectorContent_OpenMP) malloc(sizeof(struct _N_VectorContent_OpenMP)); if (content == NULL) { free(ops); free(v); return(NULL); } content->length = NV_LENGTH_OMP(w); content->num_threads = NV_NUM_THREADS_OMP(w); content->own_data = FALSE; content->data = NULL; /* Attach content and ops */ v->content = content; v->ops = ops; return(v); } /* ---------------------------------------------------------------------------- * Create new vector from existing vector and attach data */ N_Vector N_VClone_OpenMP(N_Vector w) { N_Vector v; realtype *data; long int length; v = NULL; v = N_VCloneEmpty_OpenMP(w); if (v == NULL) return(NULL); length = NV_LENGTH_OMP(w); /* Create data */ if (length > 0) { /* Allocate memory */ data = NULL; data = (realtype *) malloc(length * sizeof(realtype)); if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); } /* Attach data */ NV_OWN_DATA_OMP(v) = TRUE; NV_DATA_OMP(v) = data; } return(v); } /* ---------------------------------------------------------------------------- * Destroy vector and free vector memory */ void N_VDestroy_OpenMP(N_Vector v) { if (NV_OWN_DATA_OMP(v) == TRUE) { free(NV_DATA_OMP(v)); NV_DATA_OMP(v) = NULL; } free(v->content); v->content = NULL; free(v->ops); v->ops = NULL; free(v); v = NULL; return; } /* ---------------------------------------------------------------------------- * Get storage requirement for N_Vector */ void N_VSpace_OpenMP(N_Vector v, long int *lrw, long int *liw) { *lrw = NV_LENGTH_OMP(v); *liw = 1; return; } /* ---------------------------------------------------------------------------- * Get vector data pointer */ realtype *N_VGetArrayPointer_OpenMP(N_Vector v) { return((realtype *) NV_DATA_OMP(v)); } /* ---------------------------------------------------------------------------- * Set vector data pointer */ void N_VSetArrayPointer_OpenMP(realtype *v_data, N_Vector v) { if (NV_LENGTH_OMP(v) > 0) NV_DATA_OMP(v) = v_data; return; } /* ---------------------------------------------------------------------------- * Compute linear combination z[i] = a*x[i]+b*y[i] */ void N_VLinearSum_OpenMP(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z) { long int i, N; realtype c, *xd, *yd, *zd; N_Vector v1, v2; booleantype test; xd = yd = zd = NULL; if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */ Vaxpy_OpenMP(a,x,y); return; } if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */ Vaxpy_OpenMP(b,y,x); return; } /* Case: a == b == 1.0 */ if ((a == ONE) && (b == ONE)) { VSum_OpenMP(x, y, z); return; } /* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */ if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) { v1 = test ? y : x; v2 = test ? x : y; VDiff_OpenMP(v2, v1, z); return; } /* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */ /* if a or b is 0.0, then user should have called N_VScale */ if ((test = (a == ONE)) || (b == ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin1_OpenMP(c, v1, v2, z); return; } /* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */ if ((test = (a == -ONE)) || (b == -ONE)) { c = test ? b : a; v1 = test ? y : x; v2 = test ? x : y; VLin2_OpenMP(c, v1, v2, z); return; } /* Case: a == b */ /* catches case both a and b are 0.0 - user should have called N_VConst */ if (a == b) { VScaleSum_OpenMP(a, x, y, z); return; } /* Case: a == -b */ if (a == -b) { VScaleDiff_OpenMP(a, x, y, z); return; } /* Do all cases not handled above: (1) a == other, b == 0.0 - user should have called N_VScale (2) a == 0.0, b == other - user should have called N_VScale (3) a,b == other, a !=b, a != -b */ N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,a,b,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = (a*xd[i])+(b*yd[i]); return; } /* ---------------------------------------------------------------------------- * Assigns constant value to all vector elements, z[i] = c */ void N_VConst_OpenMP(realtype c, N_Vector z) { long int i, N; realtype *zd; zd = NULL; N = NV_LENGTH_OMP(z); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,c,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(z)) for (i = 0; i < N; i++) zd[i] = c; return; } /* ---------------------------------------------------------------------------- * Compute componentwise product z[i] = x[i]*y[i] */ void N_VProd_OpenMP(N_Vector x, N_Vector y, N_Vector z) { long int i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]*yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise division z[i] = x[i]/y[i] */ void N_VDiv_OpenMP(N_Vector x, N_Vector y, N_Vector z) { long int i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]/yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaler multiplication z[i] = c*x[i] */ void N_VScale_OpenMP(realtype c, N_Vector x, N_Vector z) { long int i, N; realtype *xd, *zd; xd = zd = NULL; if (z == x) { /* BLAS usage: scale x <- cx */ VScaleBy_OpenMP(c, x); return; } if (c == ONE) { VCopy_OpenMP(x, z); } else if (c == -ONE) { VNeg_OpenMP(x, z); } else { N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = c*xd[i]; } return; } /* ---------------------------------------------------------------------------- * Compute absolute value of vector components z[i] = SUNRabs(x[i]) */ void N_VAbs_OpenMP(N_Vector x, N_Vector z) { long int i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = SUNRabs(xd[i]); return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = 1 / x[i] */ void N_VInv_OpenMP(N_Vector x, N_Vector z) { long int i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = ONE/xd[i]; return; } /* ---------------------------------------------------------------------------- * Compute componentwise addition of a scaler to a vector z[i] = x[i] + b */ void N_VAddConst_OpenMP(N_Vector x, realtype b, N_Vector z) { long int i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,b,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]+b; return; } /* ---------------------------------------------------------------------------- * Computes the dot product of two vectors, a = sum(x[i]*y[i]) */ realtype N_VDotProd_OpenMP(N_Vector x, N_Vector y) { long int i, N; realtype sum, *xd, *yd; sum = ZERO; xd = yd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); #pragma omp parallel for default(none) private(i) shared(N,xd,yd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { sum += xd[i]*yd[i]; } return(sum); } /* ---------------------------------------------------------------------------- * Computes max norm of a vector */ realtype N_VMaxNorm_OpenMP(N_Vector x) { long int i, N; realtype tmax, max, *xd; max = ZERO; xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); #pragma omp parallel default(none) private(i,tmax) shared(N,max,xd) \ num_threads(NV_NUM_THREADS_OMP(x)) { tmax = ZERO; #pragma omp for schedule(static) for (i = 0; i < N; i++) { if (SUNRabs(xd[i]) > tmax) tmax = SUNRabs(xd[i]); } #pragma omp critical { if (tmax > max) max = tmax; } } return(max); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a vector */ realtype N_VWrmsNorm_OpenMP(N_Vector x, N_Vector w) { long int i, N; realtype sum, *xd, *wd; sum = ZERO; xd = wd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); wd = NV_DATA_OMP(w); #pragma omp parallel for default(none) private(i) shared(N,xd,wd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { sum += SUNSQR(xd[i]*wd[i]); } return(SUNRsqrt(sum/N)); } /* ---------------------------------------------------------------------------- * Computes weighted root mean square norm of a masked vector */ realtype N_VWrmsNormMask_OpenMP(N_Vector x, N_Vector w, N_Vector id) { long int i, N; realtype sum, *xd, *wd, *idd; sum = ZERO; xd = wd = idd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); wd = NV_DATA_OMP(w); idd = NV_DATA_OMP(id); #pragma omp parallel for default(none) private(i) shared(N,xd,wd,idd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { if (idd[i] > ZERO) { sum += SUNSQR(xd[i]*wd[i]); } } return(SUNRsqrt(sum / N)); } /* ---------------------------------------------------------------------------- * Finds the minimun component of a vector */ realtype N_VMin_OpenMP(N_Vector x) { long int i, N; realtype min, *xd; realtype tmin; xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); min = xd[0]; #pragma omp parallel default(none) private(i,tmin) shared(N,min,xd) \ num_threads(NV_NUM_THREADS_OMP(x)) { tmin = xd[0]; #pragma omp for schedule(static) for (i = 1; i < N; i++) { if (xd[i] < tmin) tmin = xd[i]; } if (tmin < min) { #pragma omp critical { if (tmin < min) min = tmin; } } } return(min); } /* ---------------------------------------------------------------------------- * Computes weighted L2 norm of a vector */ realtype N_VWL2Norm_OpenMP(N_Vector x, N_Vector w) { long int i, N; realtype sum, *xd, *wd; sum = ZERO; xd = wd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); wd = NV_DATA_OMP(w); #pragma omp parallel for default(none) private(i) shared(N,xd,wd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { sum += SUNSQR(xd[i]*wd[i]); } return(SUNRsqrt(sum)); } /* ---------------------------------------------------------------------------- * Computes L1 norm of a vector */ realtype N_VL1Norm_OpenMP(N_Vector x) { long int i, N; realtype sum, *xd; sum = ZERO; xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); #pragma omp parallel for default(none) private(i) shared(N,xd) \ reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i<N; i++) sum += SUNRabs(xd[i]); return(sum); } /* ---------------------------------------------------------------------------- * Compare vector component values to a scaler */ void N_VCompare_OpenMP(realtype c, N_Vector x, N_Vector z) { long int i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { zd[i] = (SUNRabs(xd[i]) >= c) ? ONE : ZERO; } return; } /* ---------------------------------------------------------------------------- * Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO */ booleantype N_VInvTest_OpenMP(N_Vector x, N_Vector z) { long int i, N; realtype *xd, *zd, val; xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); val = ZERO; #pragma omp parallel for default(none) private(i) shared(N,val,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { if (xd[i] == ZERO) val = ONE; else zd[i] = ONE/xd[i]; } if (val > ZERO) return (FALSE); else return (TRUE); } /* ---------------------------------------------------------------------------- * Compute constraint mask of a vector */ booleantype N_VConstrMask_OpenMP(N_Vector c, N_Vector x, N_Vector m) { long int i, N; realtype temp; realtype *cd, *xd, *md; cd = xd = md = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); cd = NV_DATA_OMP(c); md = NV_DATA_OMP(m); temp = ONE; #pragma omp parallel for default(none) private(i) shared(N,xd,cd,md,temp) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) { md[i] = ZERO; if (cd[i] == ZERO) continue; if (cd[i] > ONEPT5 || cd[i] < -ONEPT5) { if ( xd[i]*cd[i] <= ZERO) { temp = ZERO; md[i] = ONE; } continue; } if ( cd[i] > HALF || cd[i] < -HALF) { if (xd[i]*cd[i] < ZERO ) { temp = ZERO; md[i] = ONE; } } } if (temp == ONE) return (TRUE); else return(FALSE); } /* ---------------------------------------------------------------------------- * Compute minimum componentwise quotient */ realtype N_VMinQuotient_OpenMP(N_Vector num, N_Vector denom) { long int i, N; realtype *nd, *dd, min, tmin, val; nd = dd = NULL; N = NV_LENGTH_OMP(num); nd = NV_DATA_OMP(num); dd = NV_DATA_OMP(denom); min = BIG_REAL; #pragma omp parallel default(none) private(i,tmin,val) shared(N,min,nd,dd) \ num_threads(NV_NUM_THREADS_OMP(num)) { tmin = BIG_REAL; #pragma omp for schedule(static) for (i = 0; i < N; i++) { if (dd[i] != ZERO) { val = nd[i]/dd[i]; if (val < tmin) tmin = val; } } if (tmin < min) { #pragma omp critical { if (tmin < min) min = tmin; } } } return(min); } /* * ----------------------------------------------------------------- * private functions * ----------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- * Copy vector components into a second vector */ static void VCopy_OpenMP(N_Vector x, N_Vector z) { long int i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector sum */ static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z) { long int i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]+yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector difference */ static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z) { long int i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = xd[i]-yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute the negative of a vector */ static void VNeg_OpenMP(N_Vector x, N_Vector z) { long int i, N; realtype *xd, *zd; xd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = -xd[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaled vector sum */ static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z) { long int i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = c*(xd[i]+yd[i]); return; } /* ---------------------------------------------------------------------------- * Compute scaled vector difference */ static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z) { long int i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = c*(xd[i]-yd[i]); return; } /* ---------------------------------------------------------------------------- * Compute vector sum z[i] = a*x[i]+y[i] */ static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z) { long int i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = (a*xd[i])+yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute vector difference z[i] = a*x[i]-y[i] */ static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z) { long int i, N; realtype *xd, *yd, *zd; xd = yd = zd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); zd = NV_DATA_OMP(z); #pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) zd[i] = (a*xd[i])-yd[i]; return; } /* ---------------------------------------------------------------------------- * Compute special cases of linear sum */ static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y) { long int i, N; realtype *xd, *yd; xd = yd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); yd = NV_DATA_OMP(y); if (a == ONE) { #pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) yd[i] += xd[i]; return; } if (a == -ONE) { #pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) yd[i] -= xd[i]; return; } #pragma omp parallel for default(none) private(i) shared(N,a,xd,yd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) yd[i] += a*xd[i]; return; } /* ---------------------------------------------------------------------------- * Compute scaled vector x[i] = a*x[i] */ static void VScaleBy_OpenMP(realtype a, N_Vector x) { long int i, N; realtype *xd; xd = NULL; N = NV_LENGTH_OMP(x); xd = NV_DATA_OMP(x); #pragma omp parallel for default(none) private(i) shared(N,a,xd) schedule(static) \ num_threads(NV_NUM_THREADS_OMP(x)) for (i = 0; i < N; i++) xd[i] *= a; return; }
linalg.h
/* Software SPAMS v2.1 - Copyright 2009-2011 Julien Mairal * * This file is part of SPAMS. * * SPAMS is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * SPAMS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with SPAMS. If not, see <http://www.gnu.org/licenses/>. */ /* \file * toolbox Linalg * * by Julien Mairal * julien.mairal@inria.fr * * File linalg.h * \brief Contains Matrix, Vector classes */ #ifndef LINALG_H #define LINALG_H #include "misc.h" #ifdef USE_BLAS_LIB #include "cblas_alt_template.h" #else #include "cblas_template.h" // this is obsolete #endif #include <fstream> #ifdef WINDOWS #include <string> #else #include <cstring> #endif #include <list> #include <vector> #ifdef NEW_MATLAB typedef ptrdiff_t INTT; #else typedef int INTT; #endif #include <utils.h> #undef max #undef min /// Dense Matrix class template<typename T> class Matrix; /// Sparse Matrix class template<typename T> class SpMatrix; /// Dense Vector class template<typename T> class Vector; /// Sparse Vector class template<typename T> class SpVector; typedef std::list< int > group; typedef std::list< group > list_groups; typedef std::vector< group > vector_groups; template <typename T> static inline bool isZero(const T lambda) { return static_cast<double>(abs<T>(lambda)) < 1e-99; } template <typename T> static inline bool isEqual(const T lambda1, const T lambda2) { return static_cast<double>(abs<T>(lambda1-lambda2)) < 1e-99; } template <typename T> static inline T softThrs(const T x, const T lambda) { if (x > lambda) { return x-lambda; } else if (x < -lambda) { return x+lambda; } else { return 0; } }; template <typename T> static inline T hardThrs(const T x, const T lambda) { return (x > lambda || x < -lambda) ? x : 0; }; template <typename T> static inline T alt_log(const T x); template <> inline double alt_log<double>(const double x) { return log(x); }; template <> inline float alt_log<float>(const float x) { return logf(x); }; template <typename T> static inline T xlogx(const T x) { if (x < -1e-20) { return INFINITY; } else if (x < 1e-20) { return 0; } else { return x*alt_log<T>(x); } } template <typename T> static inline T logexp(const T x) { if (x < -30) { return 0; } else if (x < 30) { return alt_log<T>( T(1.0) + exp_alt<T>( x ) ); } else { return x; } } /// Data class, abstract class, useful in the class image. template <typename T> class Data { public: virtual void getData(Vector<T>& data, const int i) const = 0; virtual void getGroup(Matrix<T>& data, const vector_groups& groups, const int i) const = 0; virtual inline T operator[](const int index) const = 0; virtual int n() const = 0; virtual int m() const = 0; virtual int V() const = 0; virtual void norm_2sq_cols(Vector<T>& norms) const { }; virtual ~Data() { }; }; /// Abstract matrix class template <typename T> class AbstractMatrixB { public: virtual int n() const = 0; virtual int m() const = 0; /// b <- alpha A'x + beta b virtual void multTrans(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const = 0; /// perform b = alpha*A*x + beta*b, when x is sparse virtual void mult(const SpVector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const = 0; virtual void mult(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const = 0; /// perform C = a*A*B + b*C, possibly transposing A or B. virtual void mult(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const = 0; virtual void mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const = 0; /// perform C = a*B*A + b*C, possibly transposing A or B. virtual void multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const = 0; /// XtX = A'*A virtual void XtX(Matrix<T>& XtX) const = 0; virtual void copyRow(const int i, Vector<T>& x) const = 0; virtual void copyTo(Matrix<T>& copy) const = 0; virtual T dot(const Matrix<T>& x) const = 0; virtual void print(const string& name) const = 0; virtual ~AbstractMatrixB() { }; }; /// Abstract matrix class template <typename T> class AbstractMatrix { public: virtual int n() const = 0; virtual int m() const = 0; /// copy X(:,i) into Xi virtual void copyCol(const int i, Vector<T>& Xi) const = 0; /// compute X(:,i)<- X(:,i)+a*col; virtual void add_rawCol(const int i, T* col, const T a) const = 0; /// copy X(:,i) into Xi virtual void extract_rawCol(const int i,T* Xi) const = 0; /// extract diagonal virtual void diag(Vector<T>& diag) const = 0; //// extract X(index1,index2) virtual inline T operator()(const int index1, const int index2) const = 0; virtual ~AbstractMatrix() { }; }; /// Class Matrix template<typename T> class Matrix : public Data<T>, public AbstractMatrix<T>, public AbstractMatrixB<T> { friend class SpMatrix<T>; public: /// Constructor with existing data X of an m x n matrix Matrix(T* X, int m, int n); /// Constructor for a new m x n matrix Matrix(int m, int n); /// Empty constructor Matrix(); /// Destructor virtual ~Matrix(); /// Accessors /// Number of rows inline int m() const { return _m; }; /// Number of columns inline int n() const { return _n; }; /// Return a modifiable reference to X(i,j) inline T& operator()(const int i, const int j); /// Return the value X(i,j) inline T operator()(const int i, const int j) const; /// Return a modifiable reference to X(i) (1D indexing) inline T& operator[](const int index) { return _X[index]; }; /// Return the value X(i) (1D indexing) inline T operator[](const int index) const { return _X[index]; }; /// Copy the column i into x inline void copyCol(const int i, Vector<T>& x) const; /// Copy the column i into x inline void copyRow(const int i, Vector<T>& x) const; /// Copy the column i into x inline void extract_rawCol(const int i, T* x) const; /// Copy the column i into x virtual void add_rawCol(const int i, T* DtXi, const T a) const; /// Copy the column i into x inline void getData(Vector<T>& data, const int i) const; /// extract the group i virtual void getGroup(Matrix<T>& data, const vector_groups& groups, const int i) const; /// Reference the column i into the vector x inline void refCol(int i, Vector<T>& x) const; /// Reference the column i to i+n into the Matrix mat inline void refSubMat(int i, int n, Matrix<T>& mat) const; /// extract a sub-matrix of a symmetric matrix inline void subMatrixSym(const Vector<int>& indices, Matrix<T>& subMatrix) const; /// reference a modifiable reference to the data, DANGEROUS inline T* rawX() const { return _X; }; /// return a non-modifiable reference to the data inline const T* X() const { return _X; }; /// make a copy of the matrix mat in the current matrix inline void copy(const Matrix<T>& mat); /// make a copy of the matrix mat in the current matrix inline void copyTo(Matrix<T>& mat) const { mat.copy(*this); }; /// make a copy of the matrix mat in the current matrix inline void copyRef(const Matrix<T>& mat); /// Debugging function /// Print the matrix to std::cout inline void print(const string& name) const; /// Modifiers /// clean a dictionary matrix inline void clean(); /// Resize the matrix inline void resize(int m, int n); /// Change the data in the matrix inline void setData(T* X, int m, int n); /// modify _m inline void setm(const int m) { _m = m; }; //DANGEROUS /// modify _n inline void setn(const int n) { _n = n; }; //DANGEROUS /// Set all the values to zero inline void setZeros(); /// Set all the values to a scalar inline void set(const T a); /// Clear the matrix inline void clear(); /// Put white Gaussian noise in the matrix inline void setAleat(); /// set the matrix to the identity; inline void eye(); /// Normalize all columns to unit l2 norm inline void normalize(); /// Normalize all columns which l2 norm is greater than one. inline void normalize2(); /// center the columns of the matrix inline void center(); /// center the columns of the matrix inline void center_rows(); /// center the columns of the matrix and keep the center values inline void center(Vector<T>& centers); /// scale the matrix by the a inline void scal(const T a); /// make the matrix symmetric by copying the upper-right part /// into the lower-left part inline void fillSymmetric(); inline void fillSymmetric2(); /// change artificially the size of the matrix, DANGEROUS inline void fakeSize(const int m, const int n) { _n = n; _m=m;}; /// whiten inline void whiten(const int V); /// whiten inline void whiten(Vector<T>& mean, const bool pattern = false); /// whiten inline void whiten(Vector<T>& mean, const Vector<T>& mask); /// whiten inline void unwhiten(Vector<T>& mean, const bool pattern = false); /// whiten inline void sum_cols(Vector<T>& sum) const; /// Analysis functions /// Check wether the columns of the matrix are normalized or not inline bool isNormalized() const; /// return the 1D-index of the value of greatest magnitude inline int fmax() const; /// return the 1D-index of the value of greatest magnitude inline T fmaxval() const; /// return the 1D-index of the value of lowest magnitude inline int fmin() const; // Algebric operations /// Transpose the current matrix and put the result in the matrix /// trans inline void transpose(Matrix<T>& trans); /// A <- -A inline void neg(); /// add one to the diagonal inline void incrDiag(); inline void addDiag(const Vector<T>& diag); inline void addDiag(const T diag); inline void addToCols(const Vector<T>& diag); inline void addVecToCols(const Vector<T>& diag, const T a = 1.0); /// perform a rank one approximation uv' using the power method /// u0 is an initial guess for u (can be empty). inline void svdRankOne(const Vector<T>& u0, Vector<T>& u, Vector<T>& v) const; inline void singularValues(Vector<T>& u) const; inline void svd(Matrix<T>& U, Vector<T>& S, Matrix<T>&V) const; /// find the eigenvector corresponding to the largest eigenvalue /// when the current matrix is symmetric. u0 is the initial guess. /// using two iterations of the power method inline void eigLargestSymApprox(const Vector<T>& u0, Vector<T>& u) const; /// find the eigenvector corresponding to the eivenvalue with the /// largest magnitude when the current matrix is symmetric, /// using the power method. It /// returns the eigenvalue. u0 is an initial guess for the /// eigenvector. inline T eigLargestMagnSym(const Vector<T>& u0, Vector<T>& u) const; /// returns the value of the eigenvalue with the largest magnitude /// using the power iteration. inline T eigLargestMagnSym() const; /// inverse the matrix when it is symmetric inline void invSym(); /// perform b = alpha*A'x + beta*b inline void multTrans(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A'x + beta*b inline void multTrans(const Vector<T>& x, Vector<T>& b, const Vector<bool>& active) const; /// perform b = A'x, when x is sparse inline void multTrans(const SpVector<T>& x, Vector<T>& b, const T alpha =1.0, const T beta = 0.0) const; /// perform b = alpha*A*x+beta*b inline void mult(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A*x + beta*b, when x is sparse inline void mult(const SpVector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform C = a*A*B + b*C, possibly transposing A or B. inline void mult(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. inline void multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = A*B, when B is sparse inline void mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// mult by a diagonal matrix on the left inline void multDiagLeft(const Vector<T>& diag); /// mult by a diagonal matrix on the right inline void multDiagRight(const Vector<T>& diag); /// C = A .* B, elementwise multiplication inline void mult_elementWise(const Matrix<T>& B, Matrix<T>& C) const; inline void div_elementWise(const Matrix<T>& B, Matrix<T>& C) const; /// XtX = A'*A inline void XtX(Matrix<T>& XtX) const; /// XXt = A*A' inline void XXt(Matrix<T>& XXt) const; /// XXt = A*A' where A is an upper triangular matrix inline void upperTriXXt(Matrix<T>& XXt, const int L) const; /// extract the diagonal inline void diag(Vector<T>& d) const; /// set the diagonal inline void setDiag(const Vector<T>& d); /// set the diagonal inline void setDiag(const T val); /// each element of the matrix is replaced by its exponential inline void exp(); /// each element of the matrix is replaced by its square root inline void Sqrt(); inline void Invsqrt(); /// return vec1'*A*vec2, where vec2 is sparse inline T quad(const Vector<T>& vec1, const SpVector<T>& vec2) const; /// return vec1'*A*vec2, where vec2 is sparse inline void quad_mult(const Vector<T>& vec1, const SpVector<T>& vec2, Vector<T>& y, const T a = 1.0, const T b = 0.0) const; /// return vec'*A*vec when vec is sparse inline T quad(const SpVector<T>& vec) const; /// add alpha*mat to the current matrix inline void add(const Matrix<T>& mat, const T alpha = 1.0); /// add alpha to the current matrix inline void add(const T alpha); /// add alpha*mat to the current matrix inline T dot(const Matrix<T>& mat) const; /// substract the matrix mat to the current matrix inline void sub(const Matrix<T>& mat); /// inverse the elements of the matrix inline void inv_elem(); /// inverse the elements of the matrix inline void inv() { this->inv_elem(); }; /// return the trace of the matrix inline T trace() const; /// compute the sum of the magnitude of the matrix values inline T asum() const; /// return ||A||_F inline T normF() const; /// whiten inline T mean() const; /// return ||A||_F^2 inline T normFsq() const; /// return ||A||_F^2 inline T nrm2sq() const { return this->normFsq(); }; /// return ||At||_{inf,2} (max of l2 norm of the columns) inline T norm_inf_2_col() const; /// return ||At||_{1,2} (max of l2 norm of the columns) inline T norm_1_2_col() const; /// returns the l2 norms of the columns inline void norm_2_cols(Vector<T>& norms) const; /// returns the l2 norms of the columns inline void norm_2_rows(Vector<T>& norms) const; /// returns the linf norms of the columns inline void norm_inf_cols(Vector<T>& norms) const; /// returns the linf norms of the columns inline void norm_inf_rows(Vector<T>& norms) const; /// returns the linf norms of the columns inline void norm_l1_rows(Vector<T>& norms) const; /// returns the l2 norms ^2 of the columns inline void norm_2sq_cols(Vector<T>& norms) const; /// returns the l2 norms of the columns inline void norm_2sq_rows(Vector<T>& norms) const; inline void thrsmax(const T nu); inline void thrsmin(const T nu); inline void thrsabsmin(const T nu); /// perform soft-thresholding of the matrix, with the threshold nu inline void softThrshold(const T nu); inline void hardThrshold(const T nu); /// perform soft-thresholding of the matrix, with the threshold nu inline void thrsPos(); /// perform A <- A + alpha*vec1*vec2' inline void rank1Update(const Vector<T>& vec1, const Vector<T>& vec2, const T alpha = 1.0); /// perform A <- A + alpha*vec1*vec2', when vec1 is sparse inline void rank1Update(const SpVector<T>& vec1, const Vector<T>& vec2, const T alpha = 1.0); /// perform A <- A + alpha*vec1*vec2', when vec2 is sparse inline void rank1Update(const Vector<T>& vec1, const SpVector<T>& vec2, const T alpha = 1.0); inline void rank1Update_mult(const Vector<T>& vec1, const Vector<T>& vec1b, const SpVector<T>& vec2, const T alpha = 1.0); /// perform A <- A + alpha*vec*vec', when vec2 is sparse inline void rank1Update(const SpVector<T>& vec, const T alpha = 1.0); /// perform A <- A + alpha*vec*vec', when vec2 is sparse inline void rank1Update(const SpVector<T>& vec, const SpVector<T>& vec2, const T alpha = 1.0); /// Compute the mean of the columns inline void meanCol(Vector<T>& mean) const; /// Compute the mean of the rows inline void meanRow(Vector<T>& mean) const; /// fill the matrix with the row given inline void fillRow(const Vector<T>& row); /// fill the matrix with the row given inline void extractRow(const int i, Vector<T>& row) const; inline void setRow(const int i, const Vector<T>& row); inline void addRow(const int i, const Vector<T>& row, const T a=1.0); /// compute x, such that b = Ax, WARNING this function needs to be u /// updated inline void conjugateGradient(const Vector<T>& b, Vector<T>& x, const T tol = 1e-4, const int = 4) const; /// compute x, such that b = Ax, WARNING this function needs to be u /// updated, the temporary vectors are given. inline void drop(char* fileName) const; /// compute a Nadaraya Watson estimator inline void NadarayaWatson(const Vector<int>& ind, const T sigma); /// performs soft-thresholding of the vector inline void blockThrshold(const T nu, const int sizeGroup); /// performs sparse projections of the columns inline void sparseProject(Matrix<T>& out, const T thrs, const int mode = 1, const T lambda1 = 0, const T lambda2 = 0, const T lambda3 = 0, const bool pos = false, const int numThreads=-1); inline void transformFilter(); /// Conversion /// make a sparse copy of the current matrix inline void toSparse(SpMatrix<T>& matrix) const; /// make a sparse copy of the current matrix inline void toSparseTrans(SpMatrix<T>& matrixTrans); /// make a reference of the matrix to a vector vec inline void toVect(Vector<T>& vec) const; /// Accessor inline int V() const { return 1;}; /// merge two dictionaries inline void merge(const Matrix<T>& B, Matrix<T>& C) const; /// extract the rows of a matrix corresponding to a binary mask inline void copyMask(Matrix<T>& out, Vector<bool>& mask) const; protected: /// Forbid lazy copies explicit Matrix<T>(const Matrix<T>& matrix); /// Forbid lazy copies Matrix<T>& operator=(const Matrix<T>& matrix); /// is the data allocation external or not bool _externAlloc; /// pointer to the data T* _X; /// number of rows int _m; /// number of columns int _n; }; /// Class for dense vector template<typename T> class Vector { friend class SpMatrix<T>; friend class Matrix<T>; friend class SpVector<T>; public: /// Empty constructor Vector(); /// Constructor. Create a new vector of size n Vector(int n); /// Constructor with existing data Vector(T* X, int n); /// Copy constructor explicit Vector<T>(const Vector<T>& vec); /// Destructor virtual ~Vector(); /// Accessors /// Print the vector to std::cout inline void print(const char* name) const; /// returns the index of the largest value inline int max() const; /// returns the index of the minimum value inline int min() const; /// returns the maximum value inline T maxval() const; /// returns the minimum value inline T minval() const; /// returns the index of the value with largest magnitude inline int fmax() const; /// returns the index of the value with smallest magnitude inline int fmin() const; /// returns the maximum magnitude inline T fmaxval() const; /// returns the minimum magnitude inline T fminval() const; /// returns a reference to X[index] inline T& operator[](const int index); /// returns X[index] inline T operator[](const int index) const; /// make a copy of x inline void copy(const Vector<T>& x); /// returns the size of the vector inline int n() const { return _n; }; /// returns a modifiable reference of the data, DANGEROUS inline T* rawX() const { return _X; }; /// change artificially the size of the vector, DANGEROUS inline void fakeSize(const int n) { _n = n; }; /// generate logarithmically spaced values inline void logspace(const int n, const T a, const T b); inline int nnz() const; /// Modifiers /// Set all values to zero inline void setZeros(); /// resize the vector inline void resize(const int n); /// change the data of the vector inline void setPointer(T* X, const int n); inline void setData(T* X, const int n) { this->setPointer(X,n); }; /// put a random permutation of size n (for integral vectors) inline void randperm(int n); /// put random values in the vector (White Gaussian Noise) inline void setAleat(); /// clear the vector inline void clear(); /// performs soft-thresholding of the vector inline void softThrshold(const T nu); inline void hardThrshold(const T nu); /// performs soft-thresholding of the vector inline void thrsmax(const T nu); inline void thrsmin(const T nu); inline void thrsabsmin(const T nu); /// performs soft-thresholding of the vector inline void thrshold(const T nu); /// performs soft-thresholding of the vector inline void thrsPos(); /// set each value of the vector to val inline void set(const T val); inline void setn(const int n) { _n = n; }; //DANGEROUS inline bool alltrue() const; inline bool allfalse() const; /// Algebric operations /// returns ||A||_2 inline T nrm2() const; /// returns ||A||_2^2 inline T nrm2sq() const; /// returns A'x inline T dot(const Vector<T>& x) const; /// returns A'x, when x is sparse inline T dot(const SpVector<T>& x) const; /// A <- A + a*x inline void add(const Vector<T>& x, const T a = 1.0); /// A <- A + a*x inline void add(const SpVector<T>& x, const T a = 1.0); /// adds a to each value in the vector inline void add(const T a); /// A <- A - x inline void sub(const Vector<T>& x); /// A <- A + a*x inline void sub(const SpVector<T>& x); /// A <- A ./ x inline void div(const Vector<T>& x); /// A <- x ./ y inline void div(const Vector<T>& x, const Vector<T>& y); /// A <- x .^ 2 inline void sqr(const Vector<T>& x); /// A <- 1 ./ sqrt(x) inline void Sqrt(const Vector<T>& x); /// A <- 1 ./ sqrt(x) inline void Sqrt(); /// A <- 1 ./ sqrt(x) inline void Invsqrt(const Vector<T>& x); /// A <- 1 ./ sqrt(A) inline void Invsqrt(); /// A <- 1./x inline void inv(const Vector<T>& x); /// A <- 1./A inline void inv(); /// A <- x .* y inline void mult(const Vector<T>& x, const Vector<T>& y); inline void mult_elementWise(const Vector<T>& B, Vector<T>& C) const { C.mult(*this,B); }; /// normalize the vector inline void normalize(); /// normalize the vector inline void normalize2(); /// whiten inline void whiten(Vector<T>& mean, const bool pattern = false); /// whiten inline void whiten(Vector<T>& mean, const Vector<T>& mask); /// whiten inline void whiten(const int V); /// whiten inline T mean(); /// whiten inline T std(); /// compute the Kuhlback-Leiber divergence inline T KL(const Vector<T>& X); /// whiten inline void unwhiten(Vector<T>& mean, const bool pattern = false); /// scale the vector by a inline void scal(const T a); /// A <- -A inline void neg(); /// replace each value by its exponential inline void exp(); /// replace each value by its logarithm inline void log(); /// replace each value by its exponential inline void logexp(); /// replace each value by its exponential inline T softmax(const int y); /// computes the sum of the magnitudes of the vector inline T asum() const; inline T lzero() const; /// compute the sum of the differences inline T afused() const; /// returns the sum of the vector inline T sum() const; /// puts in signs, the sign of each point in the vector inline void sign(Vector<T>& signs) const; /// projects the vector onto the l1 ball of radius thrs, /// returns true if the returned vector is null inline void l1project(Vector<T>& out, const T thrs, const bool simplex = false) const; inline void l1project_weighted(Vector<T>& out, const Vector<T>& weights, const T thrs, const bool residual = false) const; inline void l1l2projectb(Vector<T>& out, const T thrs, const T gamma, const bool pos = false, const int mode = 1); inline void sparseProject(Vector<T>& out, const T thrs, const int mode = 1, const T lambda1 = 0, const T lambda2 = 0, const T lambda3 = 0, const bool pos = false); inline void project_sft(const Vector<int>& labels, const int clas); inline void project_sft_binary(const Vector<T>& labels); /// projects the vector onto the l1 ball of radius thrs, /// projects the vector onto the l1 ball of radius thrs, /// returns true if the returned vector is null inline void l1l2project(Vector<T>& out, const T thrs, const T gamma, const bool pos = false) const; inline void fusedProject(Vector<T>& out, const T lambda1, const T lambda2, const int itermax); inline void fusedProjectHomotopy(Vector<T>& out, const T lambda1,const T lambda2,const T lambda3 = 0, const bool penalty = true); /// projects the vector onto the l1 ball of radius thrs, /// sort the vector inline void sort(Vector<T>& out, const bool mode) const; /// sort the vector inline void sort(const bool mode); //// sort the vector inline void sort2(Vector<T>& out, Vector<int>& key, const bool mode) const; /// sort the vector inline void sort2(Vector<int>& key, const bool mode); /// sort the vector inline void applyBayerPattern(const int offset); /// Conversion /// make a sparse copy inline void toSparse(SpVector<T>& vec) const; /// extract the rows of a matrix corresponding to a binary mask inline void copyMask(Vector<T>& out, Vector<bool>& mask) const; private: /// = operator, Vector<T>& operator=(const Vector<T>& vec); /// if the data has been externally allocated bool _externAlloc; /// data T* _X; /// size of the vector int _n; }; /// Sparse Matrix class, CSC format template<typename T> class SpMatrix : public Data<T>, public AbstractMatrixB<T> { friend class Matrix<T>; friend class SpVector<T>; public: /// Constructor, CSC format, existing data SpMatrix(T* v, int* r, int* pB, int* pE, int m, int n, int nzmax); /// Constructor, new m x n matrix, with at most nzmax non-zeros values SpMatrix(int m, int n, int nzmax); /// Empty constructor SpMatrix(); /// Destructor ~SpMatrix(); /// Accessors /// reference the column i into vec inline void refCol(int i, SpVector<T>& vec) const; /// returns pB[i] inline int pB(const int i) const { return _pB[i]; }; /// returns r[i] inline int r(const int i) const { return _r[i]; }; /// returns v[i] inline T v(const int i) const { return _v[i]; }; /// returns the maximum number of non-zero elements inline int nzmax() const { return _nzmax; }; /// returns the number of rows inline int n() const { return _n; }; /// returns the number of columns inline int m() const { return _m; }; /// returns the number of columns inline int V() const { return 1; }; /// returns X[index] inline T operator[](const int index) const; void getData(Vector<T>& data, const int index) const; void getGroup(Matrix<T>& data, const vector_groups& groups, const int i) const ; /// print the sparse matrix inline void print(const string& name) const; /// compute the sum of the matrix elements inline T asum() const; /// compute the sum of the matrix elements inline T normFsq() const; /// Direct access to _pB inline int* pB() const { return _pB; }; /// Direct access to _pE inline int* pE() const { return _pE; }; /// Direct access to _r inline int* r() const { return _r; }; /// Direct access to _v inline T* v() const { return _v; }; /// number of nonzeros elements inline int nnz() const { return _pB[_n]; }; inline void add_direct(const SpMatrix<T>& mat, const T a); inline void copy_direct(const SpMatrix<T>& mat); inline T dot_direct(const SpMatrix<T>& mat) const; /// Modifiers /// clear the matrix inline void clear(); /// resize the matrix inline void resize(const int m, const int n, const int nzmax); /// scale the matrix by a inline void scal(const T a) const; /// Algebraic operations /// aat <- A*A' inline void AAt(Matrix<T>& aat) const; /// aat <- A(:,indices)*A(:,indices)' inline void AAt(Matrix<T>& aat, const Vector<int>& indices) const; /// aat <- sum_i w_i A(:,i)*A(:,i)' inline void wAAt(const Vector<T>& w, Matrix<T>& aat) const; /// XAt <- X*A' inline void XAt(const Matrix<T>& X, Matrix<T>& XAt) const; /// XAt <- X(:,indices)*A(:,indices)' inline void XAt(const Matrix<T>& X, Matrix<T>& XAt, const Vector<int>& indices) const; /// XAt <- sum_i w_i X(:,i)*A(:,i)' inline void wXAt( const Vector<T>& w, const Matrix<T>& X, Matrix<T>& XAt, const int numthreads=-1) const; inline void XtX(Matrix<T>& XtX) const; /// y <- A'*x inline void multTrans(const Vector<T>& x, Vector<T>& y, const T alpha = 1.0, const T beta = 0.0) const; inline void multTrans(const SpVector<T>& x, Vector<T>& y, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A*x + beta*b, when x is sparse inline void mult(const SpVector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A*x + beta*b, when x is sparse inline void mult(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform C = a*A*B + b*C, possibly transposing A or B. inline void mult(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. inline void multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. inline void mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// make a copy of the matrix mat in the current matrix inline void copyTo(Matrix<T>& mat) const { this->toFull(mat); }; /// dot product; inline T dot(const Matrix<T>& x) const; inline void copyRow(const int i, Vector<T>& x) const; inline void sum_cols(Vector<T>& sum) const; inline void copy(const SpMatrix<T>& mat); /// Conversions /// copy the sparse matrix into a dense matrix inline void toFull(Matrix<T>& matrix) const; /// copy the sparse matrix into a dense transposed matrix inline void toFullTrans(Matrix<T>& matrix) const; /// use the data from v, r for _v, _r inline void convert(const Matrix<T>&v, const Matrix<int>& r, const int K); /// use the data from v, r for _v, _r inline void convert2(const Matrix<T>&v, const Vector<int>& r, const int K); /// returns the l2 norms ^2 of the columns inline void norm_2sq_cols(Vector<T>& norms) const; /// returns the l0 norms of the columns inline void norm_0_cols(Vector<T>& norms) const; /// returns the l1 norms of the columns inline void norm_1_cols(Vector<T>& norms) const; inline void addVecToCols(const Vector<T>& diag, const T a = 1.0); inline void addVecToColsWeighted(const Vector<T>& diag, const T* weights, const T a = 1.0); private: /// forbid copy constructor explicit SpMatrix(const SpMatrix<T>& matrix); SpMatrix<T>& operator=(const SpMatrix<T>& matrix); /// if the data has been externally allocated bool _externAlloc; /// data T* _v; /// row indices int* _r; /// indices of the beginning of columns int* _pB; /// indices of the end of columns int* _pE; /// number of rows int _m; /// number of columns int _n; /// number of non-zero values int _nzmax; }; /// Sparse vector class template <typename T> class SpVector { friend class Matrix<T>; friend class SpMatrix<T>; friend class Vector<T>; public: /// Constructor, of the sparse vector of size L. SpVector(T* v, int* r, int L, int nzmax); /// Constructor, allocates nzmax slots SpVector(int nzmax); /// Empty constructor SpVector(); /// Destructor ~SpVector(); /// Accessors /// returns the length of the vector inline T nzmax() const { return _nzmax; }; /// returns the length of the vector inline T length() const { return _L; }; /// computes the sum of the magnitude of the elements inline T asum() const; /// computes the l2 norm ^2 of the vector inline T nrm2sq() const; /// computes the l2 norm of the vector inline T nrm2() const; /// computes the linf norm of the vector inline T fmaxval() const; /// print the vector to std::cerr inline void print(const string& name) const; /// create a reference on the vector r inline void refIndices(Vector<int>& indices) const; /// creates a reference on the vector val inline void refVal(Vector<T>& val) const; /// access table r inline int r(const int i) const { return _r[i]; }; /// access table r inline T v(const int i) const { return _v[i]; }; inline T* rawX() const { return _v; }; /// inline int L() const { return _L; }; /// inline void setL(const int L) { _L=L; }; /// a <- a.^2 inline void sqr(); /// dot product inline T dot(const SpVector<T>& vec) const; /// Modifiers /// clears the vector inline void clear(); /// resizes the vector inline void resize(const int nzmax); /// resize the vector as a sparse matrix void inline toSpMatrix(SpMatrix<T>& out, const int m, const int n) const; /// resize the vector as a sparse matrix void inline toFull(Vector<T>& out) const; private: /// forbids lazy copies explicit SpVector(const SpVector<T>& vector); SpVector<T>& operator=(const SpVector<T>& vector); /// external allocation bool _externAlloc; /// data T* _v; /// indices int* _r; /// length int _L; /// maximum number of nonzeros elements int _nzmax; }; /// Class representing the product of two matrices template<typename T> class ProdMatrix : public AbstractMatrix<T> { public: ProdMatrix(); /// Constructor. Matrix D'*D is represented ProdMatrix(const Matrix<T>& D, const bool high_memory = true); /// Constructor. Matrix D'*X is represented ProdMatrix(const Matrix<T>& D, const Matrix<T>& X, const bool high_memory = true); /// Constructor, D'*X is represented, with optional transpositions /*ProdMatrix(const SpMatrix<T>& D, const Matrix<T>& X, const bool transD = false, const bool transX = false);*/ /// Destructor ~ProdMatrix() { delete(_DtX);} ; /// set_matrices inline void setMatrices(const Matrix<T>& D, const bool high_memory=true); inline void setMatrices(const Matrix<T>& D, const Matrix<T>& X, const bool high_memory=true); /// compute DtX(:,i) inline void copyCol(const int i, Vector<T>& DtXi) const; /// compute DtX(:,i) inline void extract_rawCol(const int i,T* DtXi) const; /// compute DtX(:,i) virtual void add_rawCol(const int i, T* DtXi, const T a) const; /// add something to the diagonal void inline addDiag(const T diag); /// add something to the diagonal void inline diag(Vector<T>& diag) const; /// returns the number of columns inline int n() const { return _n;}; /// returns the number of rows inline int m() const { return _m;}; /// returns the value of an index inline T operator()(const int index1, const int index2) const; /// returns the value of an index inline T operator[](const int index) const; private: /// Depending on the mode, DtX is a matrix, or two matrices Matrix<T>* _DtX; const Matrix<T>* _X; const Matrix<T>* _D; bool _high_memory; int _n; int _m; T _addDiag; }; /* ************************************ * Implementation of the class Matrix * ************************************/ /// Constructor with existing data X of an m x n matrix template <typename T> Matrix<T>::Matrix(T* X, int m, int n) : _externAlloc(true), _X(X), _m(m), _n(n) { }; /// Constructor for a new m x n matrix template <typename T> Matrix<T>::Matrix(int m, int n) : _externAlloc(false), _m(m), _n(n) { #pragma omp critical { _X= new T[_n*_m]; } }; /// Empty constructor template <typename T> Matrix<T>::Matrix() : _externAlloc(false), _X(NULL), _m(0), _n(0) { }; /// Destructor template <typename T> Matrix<T>::~Matrix() { clear(); }; /// Return a modifiable reference to X(i,j) template <typename T> inline T& Matrix<T>::operator()(const int i, const int j) { return _X[j*_m+i]; }; /// Return the value X(i,j) template <typename T> inline T Matrix<T>::operator()(const int i, const int j) const { return _X[j*_m+i]; }; /// Print the matrix to std::cout template <typename T> inline void Matrix<T>::print(const string& name) const { std::cerr << name << std::endl; std::cerr << _m << " x " << _n << std::endl; for (int i = 0; i<_m; ++i) { for (int j = 0; j<_n; ++j) { printf("%10.5g ",static_cast<double>(_X[j*_m+i])); // std::cerr << _X[j*_m+i] << " "; } printf("\n "); //std::cerr << std::endl; } printf("\n "); }; /// Copy the column i into x template <typename T> inline void Matrix<T>::copyCol(const int i, Vector<T>& x) const { assert(i >= 0 && i<_n); x.resize(_m); cblas_copy<T>(_m,_X+i*_m,1,x._X,1); }; /// Copy the column i into x template <typename T> inline void Matrix<T>::copyRow(const int i, Vector<T>& x) const { assert(i >= 0 && i<_m); x.resize(_n); cblas_copy<T>(_n,_X+i,_m,x._X,1); }; /// Copy the column i into x template <typename T> inline void Matrix<T>::extract_rawCol(const int i, T* x) const { assert(i >= 0 && i<_n); cblas_copy<T>(_m,_X+i*_m,1,x,1); }; /// Copy the column i into x template <typename T> inline void Matrix<T>::add_rawCol(const int i, T* x, const T a) const { assert(i >= 0 && i<_n); cblas_axpy<T>(_m,a,_X+i*_m,1,x,1); }; /// Copy the column i into x template <typename T> inline void Matrix<T>::getData(Vector<T>& x, const int i) const { this->copyCol(i,x); }; template <typename T> inline void Matrix<T>::getGroup(Matrix<T>& data, const vector_groups& groups, const int i) const { const group& gr = groups[i]; const int N = gr.size(); data.resize(_m,N); int count=0; for (group::const_iterator it = gr.begin(); it != gr.end(); ++it) { cblas_copy<T>(_m,_X+(*it)*_m,1,data._X+count*_m,1); ++count; } }; /// Reference the column i into the vector x template <typename T> inline void Matrix<T>::refCol(int i, Vector<T>& x) const { assert(i >= 0 && i<_n); x.clear(); x._X=_X+i*_m; x._n=_m; x._externAlloc=true; }; /// Reference the column i to i+n into the Matrix mat template <typename T> inline void Matrix<T>::refSubMat(int i, int n, Matrix<T>& mat) const { mat.setData(_X+i*_m,_m,n); } /// Check wether the columns of the matrix are normalized or not template <typename T> inline bool Matrix<T>::isNormalized() const { for (int i = 0; i<_n; ++i) { T norm=cblas_nrm2<T>(_m,_X+_m*i,1); if (fabs(norm - 1.0) > 1e-6) return false; } return true; }; /// clean a dictionary matrix template <typename T> inline void Matrix<T>::clean() { this->normalize(); Matrix<T> G; this->XtX(G); T* prG = G._X; /// remove the diagonal for (int i = 0; i<_n; ++i) { for (int j = i+1; j<_n; ++j) { if (prG[i*_n+j] > 0.99) { // remove nasty column j and put random values inside Vector<T> col; this->refCol(j,col); col.setAleat(); col.normalize(); } } } }; /// return the 1D-index of the value of greatest magnitude template <typename T> inline int Matrix<T>::fmax() const { return cblas_iamax<T>(_n*_m,_X,1); }; /// return the value of greatest magnitude template <typename T> inline T Matrix<T>::fmaxval() const { return _X[cblas_iamax<T>(_n*_m,_X,1)]; }; /// return the 1D-index of the value of lowest magnitude template <typename T> inline int Matrix<T>::fmin() const { return cblas_iamin<T>(_n*_m,_X,1); }; /// extract a sub-matrix of a symmetric matrix template <typename T> inline void Matrix<T>::subMatrixSym( const Vector<int>& indices, Matrix<T>& subMatrix) const { int L = indices.n(); subMatrix.resize(L,L); T* out = subMatrix._X; int* rawInd = indices.rawX(); for (int i = 0; i<L; ++i) for (int j = 0; j<=i; ++j) out[i*L+j]=_X[rawInd[i]*_n+rawInd[j]]; subMatrix.fillSymmetric(); }; /// Resize the matrix template <typename T> inline void Matrix<T>::resize(int m, int n) { if (_n==n && _m==m) return; clear(); _n=n; _m=m; _externAlloc=false; #pragma omp critical { _X=new T[_n*_m]; } setZeros(); }; /// Change the data in the matrix template <typename T> inline void Matrix<T>::setData(T* X, int m, int n) { clear(); _X=X; _m=m; _n=n; _externAlloc=true; }; /// Set all the values to zero template <typename T> inline void Matrix<T>::setZeros() { memset(_X,0,_n*_m*sizeof(T)); }; /// Set all the values to a scalar template <typename T> inline void Matrix<T>::set(const T a) { for (int i = 0; i<_n*_m; ++i) _X[i]=a; }; /// Clear the matrix template <typename T> inline void Matrix<T>::clear() { if (!_externAlloc) delete[](_X); _n=0; _m=0; _X=NULL; _externAlloc=true; }; /// Put white Gaussian noise in the matrix template <typename T> inline void Matrix<T>::setAleat() { for (int i = 0; i<_n*_m; ++i) _X[i]=normalDistrib<T>(); }; /// set the matrix to the identity template <typename T> inline void Matrix<T>::eye() { this->setZeros(); for (int i = 0; i<MIN(_n,_m); ++i) _X[i*_m+i] = T(1.0); }; /// Normalize all columns to unit l2 norm template <typename T> inline void Matrix<T>::normalize() { //T constant = 1.0/sqrt(_m); for (int i = 0; i<_n; ++i) { T norm=cblas_nrm2<T>(_m,_X+_m*i,1); if (norm > 1e-10) { T invNorm=1.0/norm; cblas_scal<T>(_m,invNorm,_X+_m*i,1); } else { // for (int j = 0; j<_m; ++j) _X[_m*i+j]=constant; Vector<T> d; this->refCol(i,d); d.setAleat(); d.normalize(); } } }; /// Normalize all columns which l2 norm is greater than one. template <typename T> inline void Matrix<T>::normalize2() { for (int i = 0; i<_n; ++i) { T norm=cblas_nrm2<T>(_m,_X+_m*i,1); if (norm > 1.0) { T invNorm=1.0/norm; cblas_scal<T>(_m,invNorm,_X+_m*i,1); } } }; /// center the matrix template <typename T> inline void Matrix<T>::center() { for (int i = 0; i<_n; ++i) { Vector<T> col; this->refCol(i,col); T sum = col.sum(); col.add(-sum/static_cast<T>(_m)); } }; /// center the matrix template <typename T> inline void Matrix<T>::center_rows() { Vector<T> mean_rows(_m); mean_rows.setZeros(); for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) mean_rows[j] += _X[i*_m+j]; mean_rows.scal(T(1.0)/_n); for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) _X[i*_m+j] -= mean_rows[j]; }; /// center the matrix and keep the center values template <typename T> inline void Matrix<T>::center(Vector<T>& centers) { centers.resize(_n); for (int i = 0; i<_n; ++i) { Vector<T> col; this->refCol(i,col); T sum = col.sum()/static_cast<T>(_m); centers[i]=sum; col.add(-sum); } }; /// scale the matrix by the a template <typename T> inline void Matrix<T>::scal(const T a) { cblas_scal<T>(_n*_m,a,_X,1); }; /// make a copy of the matrix mat in the current matrix template <typename T> inline void Matrix<T>::copy(const Matrix<T>& mat) { resize(mat._m,mat._n); // cblas_copy<T>(_m*_n,mat._X,1,_X,1); memcpy(_X,mat._X,_m*_n*sizeof(T)); }; /// make a copy of the matrix mat in the current matrix template <typename T> inline void Matrix<T>::copyRef(const Matrix<T>& mat) { this->setData(mat.rawX(),mat.m(),mat.n()); }; /// make the matrix symmetric by copying the upper-right part /// into the lower-left part template <typename T> inline void Matrix<T>::fillSymmetric() { for (int i = 0; i<_n; ++i) { for (int j =0; j<i; ++j) { _X[j*_m+i]=_X[i*_m+j]; } } }; template <typename T> inline void Matrix<T>::fillSymmetric2() { for (int i = 0; i<_n; ++i) { for (int j =0; j<i; ++j) { _X[i*_m+j]=_X[j*_m+i]; } } }; template <typename T> inline void Matrix<T>::whiten(const int V) { const int sizePatch=_m/V; for (int i = 0; i<_n; ++i) { for (int j = 0; j<V; ++j) { T mean = 0; for (int k = 0; k<sizePatch; ++k) { mean+=_X[i*_m+sizePatch*j+k]; } mean /= sizePatch; for (int k = 0; k<sizePatch; ++k) { _X[i*_m+sizePatch*j+k]-=mean; } } } }; template <typename T> inline void Matrix<T>::whiten(Vector<T>& mean, const bool pattern) { mean.setZeros(); if (pattern) { const int n =static_cast<int>(sqrt(static_cast<T>(_m))); int count[4]; for (int i = 0; i<4; ++i) count[i]=0; for (int i = 0; i<_n; ++i) { int offsetx=0; for (int j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; int offsety=0; for (int k = 0; k<n; ++k) { offsety= (offsety+1) % 2; mean[2*offsetx+offsety]+=_X[i*_m+j*n+k]; count[2*offsetx+offsety]++; } } } for (int i = 0; i<4; ++i) mean[i] /= count[i]; for (int i = 0; i<_n; ++i) { int offsetx=0; for (int j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; int offsety=0; for (int k = 0; k<n; ++k) { offsety= (offsety+1) % 2; _X[i*_m+j*n+k]-=mean[2*offsetx+offsety]; } } } } else { const int V = mean.n(); const int sizePatch=_m/V; for (int i = 0; i<_n; ++i) { for (int j = 0; j<V; ++j) { for (int k = 0; k<sizePatch; ++k) { mean[j]+=_X[i*_m+sizePatch*j+k]; } } } mean.scal(T(1.0)/(_n*sizePatch)); for (int i = 0; i<_n; ++i) { for (int j = 0; j<V; ++j) { for (int k = 0; k<sizePatch; ++k) { _X[i*_m+sizePatch*j+k]-=mean[j]; } } } } }; template <typename T> inline void Matrix<T>::whiten(Vector<T>& mean, const Vector<T>& mask) { const int V = mean.n(); const int sizePatch=_m/V; mean.setZeros(); for (int i = 0; i<_n; ++i) { for (int j = 0; j<V; ++j) { for (int k = 0; k<sizePatch; ++k) { mean[j]+=_X[i*_m+sizePatch*j+k]; } } } for (int i = 0; i<V; ++i) mean[i] /= _n*cblas_asum(sizePatch,mask._X+i*sizePatch,1); for (int i = 0; i<_n; ++i) { for (int j = 0; j<V; ++j) { for (int k = 0; k<sizePatch; ++k) { if (mask[sizePatch*j+k]) _X[i*_m+sizePatch*j+k]-=mean[j]; } } } }; template <typename T> inline void Matrix<T>::unwhiten(Vector<T>& mean, const bool pattern) { if (pattern) { const int n =static_cast<int>(sqrt(static_cast<T>(_m))); for (int i = 0; i<_n; ++i) { int offsetx=0; for (int j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; int offsety=0; for (int k = 0; k<n; ++k) { offsety= (offsety+1) % 2; _X[i*_m+j*n+k]+=mean[2*offsetx+offsety]; } } } } else { const int V = mean.n(); const int sizePatch=_m/V; for (int i = 0; i<_n; ++i) { for (int j = 0; j<V; ++j) { for (int k = 0; k<sizePatch; ++k) { _X[i*_m+sizePatch*j+k]+=mean[j]; } } } } }; /// Transpose the current matrix and put the result in the matrix /// trans template <typename T> inline void Matrix<T>::transpose(Matrix<T>& trans) { trans.resize(_n,_m); T* out = trans._X; for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) out[j*_n+i] = _X[i*_m+j]; }; /// A <- -A template <typename T> inline void Matrix<T>::neg() { for (int i = 0; i<_n*_m; ++i) _X[i]=-_X[i]; }; template <typename T> inline void Matrix<T>::incrDiag() { for (int i = 0; i<MIN(_n,_m); ++i) ++_X[i*_m+i]; }; template <typename T> inline void Matrix<T>::addDiag( const Vector<T>& diag) { T* d= diag.rawX(); for (int i = 0; i<MIN(_n,_m); ++i) _X[i*_m+i] += d[i]; }; template <typename T> inline void Matrix<T>::addDiag( const T diag) { for (int i = 0; i<MIN(_n,_m); ++i) _X[i*_m+i] += diag; }; template <typename T> inline void Matrix<T>::addToCols( const Vector<T>& cent) { Vector<T> col; for (int i = 0; i<_n; ++i) { this->refCol(i,col); col.add(cent[i]); } }; template <typename T> inline void Matrix<T>::addVecToCols( const Vector<T>& vec, const T a) { Vector<T> col; for (int i = 0; i<_n; ++i) { this->refCol(i,col); col.add(vec,a); } }; /// perform a rank one approximation uv' using the power method /// u0 is an initial guess for u (can be empty). template <typename T> inline void Matrix<T>::svdRankOne(const Vector<T>& u0, Vector<T>& u, Vector<T>& v) const { int i; const int max_iter=MAX(_m,MAX(_n,200)); const T eps=1e-10; u.resize(_m); v.resize(_n); T norm=u0.nrm2(); Vector<T> up(u0); if (norm < EPSILON) up.setAleat(); up.normalize(); multTrans(up,v); for (i = 0; i<max_iter; ++i) { mult(v,u); norm=u.nrm2(); u.scal(1.0/norm); multTrans(u,v); T theta=u.dot(up); if (i > 10 && (1 - fabs(theta)) < eps) break; up.copy(u); } }; template <typename T> inline void Matrix<T>::singularValues(Vector<T>& u) const { u.resize(MIN(_m,_n)); if (_m > 10*_n) { Matrix<T> XtX; this->XtX(XtX); syev<T>(no,lower,_n,XtX.rawX(),_n,u.rawX()); u.thrsPos(); u.Sqrt(); } else if (_n > 10*_m) { Matrix<T> XXt; this->XXt(XXt); syev<T>(no,lower,_m,XXt.rawX(),_m,u.rawX()); u.thrsPos(); u.Sqrt(); } else { T* vu, *vv; Matrix<T> copyX; copyX.copy(*this); gesvd<T>(no,no,_m,_n,copyX._X,_m,u.rawX(),vu,1,vv,1); } }; template <typename T> inline void Matrix<T>::svd(Matrix<T>& U, Vector<T>& S, Matrix<T>&V) const { const int num_eig=MIN(_m,_n); S.resize(num_eig); U.resize(_m,num_eig); V.resize(num_eig,_n); if (_m > 10*_n) { Matrix<T> Vt(_n,_n); this->XtX(Vt); syev<T>(allV,lower,_n,Vt.rawX(),_n,S.rawX()); S.thrsPos(); S.Sqrt(); this->mult(Vt,U); Vt.transpose(V); Vector<T> inveigs; inveigs.copy(S); for (int i = 0; i<num_eig; ++i) if (S[i] > 1e-10) { inveigs[i]=T(1.0)/S[i]; } else { inveigs[i]=T(1.0); } U.multDiagRight(inveigs); } else if (_n > 10*_m) { this->XXt(U); syev<T>(allV,lower,_m,U.rawX(),_m,S.rawX()); S.thrsPos(); S.Sqrt(); U.mult(*this,V,true,false); Vector<T> inveigs; inveigs.copy(S); for (int i = 0; i<num_eig; ++i) if (S[i] > 1e-10) { inveigs[i]=T(1.0)/S[i]; } else { inveigs[i]=T(1.0); } V.multDiagLeft(inveigs); } else { Matrix<T> copyX; copyX.copy(*this); gesvd<T>(reduced,reduced,_m,_n,copyX._X,_m,S.rawX(),U.rawX(),_m,V.rawX(),num_eig); } }; /// find the eigenvector corresponding to the largest eigenvalue /// when the current matrix is symmetric. u0 is the initial guess. /// using two iterations of the power method template <typename T> inline void Matrix<T>::eigLargestSymApprox( const Vector<T>& u0, Vector<T>& u) const { int i,j; const int max_iter=100; const T eps=10e-6; u.copy(u0); T norm = u.nrm2(); T theta; u.scal(1.0/norm); Vector<T> up(u); Vector<T> uor(u); T lambda=T(); for (j = 0; j<2;++j) { up.copy(u); for (i = 0; i<max_iter; ++i) { mult(up,u); norm = u.nrm2(); u.scal(1.0/norm); theta=u.dot(up); if ((1 - fabs(theta)) < eps) break; up.copy(u); } lambda+=theta*norm; if isnan(lambda) { std::cerr << "eigLargestSymApprox failed" << std::endl; exit(1); } if (j == 1 && lambda < eps) { u.copy(uor); break; } if (theta >= 0) break; u.copy(uor); for (i = 0; i<_m; ++i) _X[i*_m+i]-=lambda; } }; /// find the eigenvector corresponding to the eivenvalue with the /// largest magnitude when the current matrix is symmetric, /// using the power method. It /// returns the eigenvalue. u0 is an initial guess for the /// eigenvector. template <typename T> inline T Matrix<T>::eigLargestMagnSym( const Vector<T>& u0, Vector<T>& u) const { const int max_iter=1000; const T eps=10e-6; u.copy(u0); T norm = u.nrm2(); u.scal(1.0/norm); Vector<T> up(u); T lambda=T(); for (int i = 0; i<max_iter; ++i) { mult(u,up); u.copy(up); norm=u.nrm2(); if (norm > 0) u.scal(1.0/norm); if (norm == 0 || fabs(norm-lambda)/norm < eps) break; lambda=norm; } return norm; }; /// returns the value of the eigenvalue with the largest magnitude /// using the power iteration. template <typename T> inline T Matrix<T>::eigLargestMagnSym() const { const int max_iter=1000; const T eps=10e-6; Vector<T> u(_m); u.setAleat(); T norm = u.nrm2(); u.scal(1.0/norm); Vector<T> up(u); T lambda=T(); for (int i = 0; i<max_iter; ++i) { mult(u,up); u.copy(up); norm=u.nrm2(); if (fabs(norm-lambda) < eps) break; lambda=norm; u.scal(1.0/norm); } return norm; }; /// inverse the matrix when it is symmetric template <typename T> inline void Matrix<T>::invSym() { // int lwork=2*_n; // T* work; //#ifdef USE_BLAS_LIB // INTT* ipiv; //#else // int* ipiv; //#endif //#pragma omp critical // { // work= new T[lwork]; //#ifdef USE_BLAS_LIB /// ipiv= new INTT[lwork]; //#else // ipiv= new int[lwork]; //#endif // } // sytrf<T>(upper,_n,_X,_n,ipiv,work,lwork); // sytri<T>(upper,_n,_X,_n,ipiv,work); // sytrf<T>(upper,_n,_X,_n); sytri<T>(upper,_n,_X,_n); this->fillSymmetric(); // delete[](work); // delete[](ipiv); }; /// perform b = alpha*A'x + beta*b template <typename T> inline void Matrix<T>::multTrans(const Vector<T>& x, Vector<T>& b, const T a, const T c) const { b.resize(_n); // assert(x._n == _m && b._n == _n); cblas_gemv<T>(CblasColMajor,CblasTrans,_m,_n,a,_X,_m,x._X,1,c,b._X,1); }; /// perform b = A'x, when x is sparse template <typename T> inline void Matrix<T>::multTrans(const SpVector<T>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_n); Vector<T> col; if (beta) { for (int i = 0; i<_n; ++i) { refCol(i,col); b._X[i] = alpha*col.dot(x); } } else { for (int i = 0; i<_n; ++i) { refCol(i,col); b._X[i] = beta*b._X[i]+alpha*col.dot(x); } } }; template <typename T> inline void Matrix<T>::multTrans( const Vector<T>& x, Vector<T>& b, const Vector<bool>& active) const { b.setZeros(); Vector<T> col; bool* pr_active=active.rawX(); for (int i = 0; i<_n; ++i) { if (pr_active[i]) { this->refCol(i,col); b._X[i]=col.dot(x); } } }; /// perform b = alpha*A*x+beta*b template <typename T> inline void Matrix<T>::mult(const Vector<T>& x, Vector<T>& b, const T a, const T c) const { // assert(x._n == _n && b._n == _m); b.resize(_m); cblas_gemv<T>(CblasColMajor,CblasNoTrans,_m,_n,a,_X,_m,x._X,1,c,b._X,1); }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> inline void Matrix<T>::mult(const SpVector<T>& x, Vector<T>& b, const T a, const T a2) const { if (!a2) { b.setZeros(); } else if (a2 != 1.0) { b.scal(a2); } if (a == 1.0) { for (int i = 0; i<x._L; ++i) { cblas_axpy<T>(_m,x._v[i],_X+x._r[i]*_m,1,b._X,1); } } else { for (int i = 0; i<x._L; ++i) { cblas_axpy<T>(_m,a*x._v[i],_X+x._r[i]*_m,1,b._X,1); } } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T> inline void Matrix<T>::mult(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { CBLAS_TRANSPOSE trA,trB; int m,k,n; if (transA) { trA = CblasTrans; m = _n; k = _m; } else { trA= CblasNoTrans; m = _m; k = _n; } if (transB) { trB = CblasTrans; n = B._m; // assert(B._n == k); } else { trB = CblasNoTrans; n = B._n; // assert(B._m == k); } C.resize(m,n); cblas_gemm<T>(CblasColMajor,trA,trB,m,n,k,a,_X,_m,B._X,B._m, b,C._X,C._m); }; /// perform C = a*B*A + b*C, possibly transposing A or B. template <typename T> inline void Matrix<T>::multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { B.mult(*this,C,transB,transA,a,b); }; /// perform C = A*B, when B is sparse template <typename T> inline void Matrix<T>::mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { if (transA) { if (transB) { C.resize(_n,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> rowC(B.m()); Vector<T> colA; for (int i = 0; i<_n; ++i) { this->refCol(i,colA); B.mult(colA,rowC,a); C.addRow(i,rowC,a); } } else { C.resize(_n,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> colC; SpVector<T> colB; for (int i = 0; i<B.n(); ++i) { C.refCol(i,colC); B.refCol(i,colB); this->multTrans(colB,colC,a,T(1.0)); } } } else { if (transB) { C.resize(_m,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> colA; SpVector<T> colB; for (int i = 0; i<_n; ++i) { this->refCol(i,colA); B.refCol(i,colB); C.rank1Update(colA,colB,a); } } else { C.resize(_m,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> colC; SpVector<T> colB; for (int i = 0; i<B.n(); ++i) { C.refCol(i,colC); B.refCol(i,colB); this->mult(colB,colC,a,T(1.0)); } } }; } /// mult by a diagonal matrix on the left template <typename T> inline void Matrix<T>::multDiagLeft(const Vector<T>& diag) { if (diag.n() != _m) return; T* d = diag.rawX(); for (int i = 0; i< _n; ++i) { for (int j = 0; j<_m; ++j) { _X[i*_m+j] *= d[j]; } } }; /// mult by a diagonal matrix on the right template <typename T> inline void Matrix<T>::multDiagRight( const Vector<T>& diag) { if (diag.n() != _n) return; T* d = diag.rawX(); for (int i = 0; i< _n; ++i) { for (int j = 0; j<_m; ++j) { _X[i*_m+j] *= d[i]; } } }; /// C = A .* B, elementwise multiplication template <typename T> inline void Matrix<T>::mult_elementWise( const Matrix<T>& B, Matrix<T>& C) const { assert(_n == B._n && _m == B._m); C.resize(_m,_n); vMul<T>(_n*_m,_X,B._X,C._X); }; /// C = A .* B, elementwise multiplication template <typename T> inline void Matrix<T>::div_elementWise( const Matrix<T>& B, Matrix<T>& C) const { assert(_n == B._n && _m == B._m); C.resize(_m,_n); vDiv<T>(_n*_m,_X,B._X,C._X); }; /// XtX = A'*A template <typename T> inline void Matrix<T>::XtX(Matrix<T>& xtx) const { xtx.resize(_n,_n); cblas_syrk<T>(CblasColMajor,CblasUpper,CblasTrans,_n,_m,T(1.0), _X,_m,T(),xtx._X,_n); xtx.fillSymmetric(); }; /// XXt = A*At template <typename T> inline void Matrix<T>::XXt(Matrix<T>& xxt) const { xxt.resize(_m,_m); cblas_syrk<T>(CblasColMajor,CblasUpper,CblasNoTrans,_m,_n,T(1.0), _X,_m,T(),xxt._X,_m); xxt.fillSymmetric(); }; /// XXt = A*A' where A is an upper triangular matrix template <typename T> inline void Matrix<T>::upperTriXXt(Matrix<T>& XXt, const int L) const { XXt.resize(L,L); for (int i = 0; i<L; ++i) { cblas_syr<T>(CblasColMajor,CblasUpper,i+1,T(1.0),_X+i*_m,1,XXt._X,L); } XXt.fillSymmetric(); } /// extract the diagonal template <typename T> inline void Matrix<T>::diag(Vector<T>& dv) const { int size_diag=MIN(_n,_m); dv.resize(size_diag); T* const d = dv.rawX(); for (int i = 0; i<size_diag; ++i) d[i]=_X[i*_m+i]; }; /// set the diagonal template <typename T> inline void Matrix<T>::setDiag(const Vector<T>& dv) { int size_diag=MIN(_n,_m); T* const d = dv.rawX(); for (int i = 0; i<size_diag; ++i) _X[i*_m+i]=d[i]; }; /// set the diagonal template <typename T> inline void Matrix<T>::setDiag(const T val) { int size_diag=MIN(_n,_m); for (int i = 0; i<size_diag; ++i) _X[i*_m+i]=val; }; /// each element of the matrix is replaced by its exponential template <typename T> inline void Matrix<T>::exp() { vExp<T>(_n*_m,_X,_X); }; template <typename T> inline void Matrix<T>::Sqrt() { vSqrt<T>(_n*_m,_X,_X); }; template <typename T> inline void Matrix<T>::Invsqrt() { vInvSqrt<T>(_n*_m,_X,_X); }; /// return vec1'*A*vec2, where vec2 is sparse template <typename T> inline T Matrix<T>::quad( const SpVector<T>& vec) const { T sum = T(); int L = vec._L; int* r = vec._r; T* v = vec._v; for (int i = 0; i<L; ++i) for (int j = 0; j<L; ++j) sum += _X[r[i]*_m+r[j]]*v[i]*v[j]; return sum; }; template <typename T> inline void Matrix<T>::quad_mult(const Vector<T>& vec1, const SpVector<T>& vec2, Vector<T>& y, const T a, const T b) const { const int size_y= y.n(); const int nn = _n/size_y; //y.resize(size_y); //y.setZeros(); Matrix<T> tmp; for (int i = 0; i<size_y; ++i) { tmp.setData(_X+(i*nn)*_m,_m,nn); y[i]=b*y[i]+a*tmp.quad(vec1,vec2); } } /// return vec'*A*vec when vec is sparse template <typename T> inline T Matrix<T>::quad( const Vector<T>& vec1, const SpVector<T>& vec) const { T sum = T(); int L = vec._L; int* r = vec._r; T* v = vec._v; Vector<T> col; for (int i = 0; i<L; ++i) { this->refCol(r[i],col); sum += v[i]*col.dot(vec1); } return sum; }; /// add alpha*mat to the current matrix template <typename T> inline void Matrix<T>::add(const Matrix<T>& mat, const T alpha) { assert(mat._m == _m && mat._n == _n); cblas_axpy<T>(_n*_m,alpha,mat._X,1,_X,1); }; /// add alpha*mat to the current matrix template <typename T> inline T Matrix<T>::dot(const Matrix<T>& mat) const { assert(mat._m == _m && mat._n == _n); return cblas_dot<T>(_n*_m,mat._X,1,_X,1); }; /// add alpha to the current matrix template <typename T> inline void Matrix<T>::add(const T alpha) { for (int i = 0; i<_n*_m; ++i) _X[i]+=alpha; }; /// substract the matrix mat to the current matrix template <typename T> inline void Matrix<T>::sub(const Matrix<T>& mat) { vSub<T>(_n*_m,_X,mat._X,_X); }; /// compute the sum of the magnitude of the matrix values template <typename T> inline T Matrix<T>::asum() const { return cblas_asum<T>(_n*_m,_X,1); }; /// returns the trace of the matrix template <typename T> inline T Matrix<T>::trace() const { T sum=T(); int m = MIN(_n,_m); for (int i = 0; i<m; ++i) sum += _X[i*_m+i]; return sum; }; /// return ||A||_F template <typename T> inline T Matrix<T>::normF() const { return cblas_nrm2<T>(_n*_m,_X,1); }; template <typename T> inline T Matrix<T>::mean() const { Vector<T> vec; this->toVect(vec); return vec.mean(); }; /// return ||A||_F^2 template <typename T> inline T Matrix<T>::normFsq() const { return cblas_dot<T>(_n*_m,_X,1,_X,1); }; /// return ||At||_{inf,2} template <typename T> inline T Matrix<T>::norm_inf_2_col() const { Vector<T> col; T max = -1.0; for (int i = 0; i<_n; ++i) { refCol(i,col); T norm_col = col.nrm2(); if (norm_col > max) max = norm_col; } return max; }; /// return ||At||_{1,2} template <typename T> inline T Matrix<T>::norm_1_2_col() const { Vector<T> col; T sum = 0.0; for (int i = 0; i<_n; ++i) { refCol(i,col); sum += col.nrm2(); } return sum; }; /// returns the l2 norms of the columns template <typename T> inline void Matrix<T>::norm_2_rows( Vector<T>& norms) const { norms.resize(_m); norms.setZeros(); for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) norms[j] += _X[i*_m+j]*_X[i*_m+j]; for (int j = 0; j<_m; ++j) norms[j]=sqrt(norms[j]); }; /// returns the l2 norms of the columns template <typename T> inline void Matrix<T>::norm_2sq_rows( Vector<T>& norms) const { norms.resize(_m); norms.setZeros(); for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) norms[j] += _X[i*_m+j]*_X[i*_m+j]; }; /// returns the l2 norms of the columns template <typename T> inline void Matrix<T>::norm_2_cols( Vector<T>& norms) const { norms.resize(_n); Vector<T> col; for (int i = 0; i<_n; ++i) { refCol(i,col); norms[i] = col.nrm2(); } }; /// returns the linf norms of the columns template <typename T> inline void Matrix<T>::norm_inf_cols(Vector<T>& norms) const { norms.resize(_n); Vector<T> col; for (int i = 0; i<_n; ++i) { refCol(i,col); norms[i] = col.fmaxval(); } }; /// returns the linf norms of the columns template <typename T> inline void Matrix<T>::norm_inf_rows(Vector<T>& norms) const { norms.resize(_m); norms.setZeros(); for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) norms[j] = MAX(abs<T>(_X[i*_m+j]),norms[j]); }; /// returns the linf norms of the columns template <typename T> inline void Matrix<T>::norm_l1_rows(Vector<T>& norms) const { norms.resize(_m); norms.setZeros(); for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) norms[j] += abs<T>(_X[i*_m+j]); }; /// returns the l2 norms of the columns template <typename T> inline void Matrix<T>::norm_2sq_cols( Vector<T>& norms) const { norms.resize(_n); Vector<T> col; for (int i = 0; i<_n; ++i) { refCol(i,col); norms[i] = col.nrm2sq(); } }; template <typename T> inline void Matrix<T>::sum_cols(Vector<T>& sum) const { sum.resize(_m); sum.setZeros(); Vector<T> tmp; for (int i = 0; i<_n; ++i) { this->refCol(i,tmp); sum.add(tmp); } }; /// Compute the mean of the columns template <typename T> inline void Matrix<T>::meanCol(Vector<T>& mean) const { Vector<T> ones(_n); ones.set(T(1.0/_n)); this->mult(ones,mean,1.0,0.0); }; /// Compute the mean of the rows template <typename T> inline void Matrix<T>::meanRow(Vector<T>& mean) const { Vector<T> ones(_m); ones.set(T(1.0/_m)); this->multTrans(ones,mean,1.0,0.0); }; /// fill the matrix with the row given template <typename T> inline void Matrix<T>::fillRow(const Vector<T>& row) { for (int i = 0; i<_n; ++i) { T val = row[i]; for (int j = 0; j<_m; ++j) { _X[i*_m+j]=val; } } }; /// fill the matrix with the row given template <typename T> inline void Matrix<T>::extractRow(const int j, Vector<T>& row) const { row.resize(_n); for (int i = 0; i<_n; ++i) { row[i]=_X[i*_m+j]; } }; /// fill the matrix with the row given template <typename T> inline void Matrix<T>::setRow(const int j, const Vector<T>& row) { for (int i = 0; i<_n; ++i) { _X[i*_m+j]=row[i]; } }; /// fill the matrix with the row given template <typename T> inline void Matrix<T>::addRow(const int j, const Vector<T>& row, const T a) { if (a==1.0) { for (int i = 0; i<_n; ++i) { _X[i*_m+j]+=row[i]; } } else { for (int i = 0; i<_n; ++i) { _X[i*_m+j]+=a*row[i]; } } }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::softThrshold(const T nu) { Vector<T> vec; toVect(vec); vec.softThrshold(nu); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::hardThrshold(const T nu) { Vector<T> vec; toVect(vec); vec.hardThrshold(nu); }; /// perform thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::thrsmax(const T nu) { Vector<T> vec; toVect(vec); vec.thrsmax(nu); }; /// perform thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::thrsmin(const T nu) { Vector<T> vec; toVect(vec); vec.thrsmin(nu); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::inv_elem() { Vector<T> vec; toVect(vec); vec.inv(); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::blockThrshold(const T nu, const int sizeGroup) { for (int i = 0; i<_n; ++i) { int j; for (j = 0; j<_m-sizeGroup+1; j+=sizeGroup) { T nrm=0; for (int k = 0; k<sizeGroup; ++k) nrm += _X[i*_m +j+k]*_X[i*_m +j+k]; nrm=sqrt(nrm); if (nrm < nu) { for (int k = 0; k<sizeGroup; ++k) _X[i*_m +j+k]=0; } else { T scal = (nrm-nu)/nrm; for (int k = 0; k<sizeGroup; ++k) _X[i*_m +j+k]*=scal; } } j -= sizeGroup; for ( ; j<_m; ++j) _X[j]=softThrs<T>(_X[j],nu); } } template <typename T> inline void Matrix<T>::sparseProject(Matrix<T>& Y, const T thrs, const int mode, const T lambda1, const T lambda2, const T lambda3, const bool pos, const int numThreads) { int NUM_THREADS=init_omp(numThreads); Vector<T>* XXT= new Vector<T>[NUM_THREADS]; for (int i = 0; i<NUM_THREADS; ++i) { XXT[i].resize(_m); } int i; #pragma omp parallel for private(i) for (i = 0; i< _n; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif Vector<T> Xi; this->refCol(i,Xi); Vector<T> Yi; Y.refCol(i,Yi); Vector<T>& XX = XXT[numT]; XX.copy(Xi); XX.sparseProject(Yi,thrs,mode,lambda1,lambda2,lambda3,pos); } delete[](XXT); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::thrsPos() { Vector<T> vec; toVect(vec); vec.thrsPos(); }; /// perform A <- A + alpha*vec1*vec2' template <typename T> inline void Matrix<T>::rank1Update( const Vector<T>& vec1, const Vector<T>& vec2, const T alpha) { cblas_ger<T>(CblasColMajor,_m,_n,alpha,vec1._X,1,vec2._X,1,_X,_m); }; /// perform A <- A + alpha*vec1*vec2', when vec1 is sparse template <typename T> inline void Matrix<T>::rank1Update( const SpVector<T>& vec1, const Vector<T>& vec2, const T alpha) { int* r = vec1._r; T* v = vec1._v; T* X2 = vec2._X; assert(vec2._n == _n); if (alpha == 1.0) { for (int i = 0; i<_n; ++i) { for (int j = 0; j<vec1._L; ++j) { _X[i*_m+r[j]] += v[j]*X2[i]; } } } else { for (int i = 0; i<_n; ++i) { for (int j = 0; j<vec1._L; ++j) { _X[i*_m+r[j]] += alpha*v[j]*X2[i]; } } } }; template <typename T> inline void Matrix<T>::rank1Update_mult(const Vector<T>& vec1, const Vector<T>& vec1b, const SpVector<T>& vec2, const T alpha) { const int nn = vec1b.n(); const int size_A = _n/nn; Matrix<T> tmp; for (int i = 0; i<nn; ++i) { tmp.setData(_X+i*size_A*_m,_m,size_A); tmp.rank1Update(vec1,vec2,alpha*vec1b[i]); } }; /// perform A <- A + alpha*vec1*vec2', when vec1 is sparse template <typename T> inline void Matrix<T>::rank1Update( const SpVector<T>& vec1, const SpVector<T>& vec2, const T alpha) { int* r = vec1._r; T* v = vec1._v; T* v2 = vec2._v; int* r2 = vec2._r; if (alpha == 1.0) { for (int i = 0; i<vec2._L; ++i) { for (int j = 0; j<vec1._L; ++j) { _X[r2[i]*_m+r[j]] += v[j]*v2[i]; } } } else { for (int i = 0; i<vec2._L; ++i) { for (int j = 0; j<vec1._L; ++j) { _X[r[i]*_m+r[j]] += alpha*v[j]*v2[i]; } } } }; /// perform A <- A + alpha*vec1*vec2', when vec2 is sparse template <typename T> inline void Matrix<T>::rank1Update( const Vector<T>& vec1, const SpVector<T>& vec2, const T alpha) { int* r = vec2._r; T* v = vec2._v; Vector<T> Xi; for (int i = 0; i<vec2._L; ++i) { this->refCol(r[i],Xi); Xi.add(vec1,v[i]*alpha); } }; /// perform A <- A + alpha*vec1*vec1', when vec1 is sparse template <typename T> inline void Matrix<T>::rank1Update( const SpVector<T>& vec1, const T alpha) { int* r = vec1._r; T* v = vec1._v; if (alpha == 1.0) { for (int i = 0; i<vec1._L; ++i) { for (int j = 0; j<vec1._L; ++j) { _X[r[i]*_m+r[j]] += v[j]*v[i]; } } } else { for (int i = 0; i<vec1._L; ++i) { for (int j = 0; j<vec1._L; ++j) { _X[_m*r[i]+r[j]] += alpha*v[j]*v[i]; } } } }; /// compute x, such that b = Ax, template <typename T> inline void Matrix<T>::conjugateGradient( const Vector<T>& b, Vector<T>& x, const T tol, const int itermax) const { Vector<T> R,P,AP; R.copy(b); this->mult(x,R,T(-1.0),T(1.0)); P.copy(R); int k = 0; T normR = R.nrm2sq(); T alpha; while (normR > tol && k < itermax) { this->mult(P,AP); alpha = normR/P.dot(AP); x.add(P,alpha); R.add(AP,-alpha); T tmp = R.nrm2sq(); P.scal(tmp/normR); normR = tmp; P.add(R,T(1.0)); ++k; }; }; template <typename T> inline void Matrix<T>::drop(char* fileName) const { std::ofstream f; f.precision(12); f.flags(std::ios_base::scientific); f.open(fileName, ofstream::trunc); std::cout << "Matrix written in " << fileName << std::endl; for (int i = 0; i<_n; ++i) { for (int j = 0; j<_m; ++j) f << _X[i*_m+j] << " "; f << std::endl; } f.close(); }; /// compute a Nadaraya Watson estimator template <typename T> inline void Matrix<T>::NadarayaWatson( const Vector<int>& ind, const T sigma) { if (ind.n() != _n) return; init_omp(MAX_THREADS); const int Ngroups=ind.maxval(); int i; #pragma omp parallel for private(i) for (i = 1; i<=Ngroups; ++i) { Vector<int> indicesGroup(_n); int count = 0; for (int j = 0; j<_n; ++j) if (ind[j] == i) indicesGroup[count++]=j; Matrix<T> Xm(_m,count); Vector<T> col, col2; for (int j= 0; j<count; ++j) { this->refCol(indicesGroup[j],col); Xm.refCol(j,col2); col2.copy(col); } Vector<T> norms; Xm.norm_2sq_cols(norms); Matrix<T> weights; Xm.XtX(weights); weights.scal(T(-2.0)); Vector<T> ones(Xm.n()); ones.set(T(1.0)); weights.rank1Update(ones,norms); weights.rank1Update(norms,ones); weights.scal(-sigma); weights.exp(); Vector<T> den; weights.mult(ones,den); den.inv(); weights.multDiagRight(den); Matrix<T> num; Xm.mult(weights,num); for (int j= 0; j<count; ++j) { this->refCol(indicesGroup[j],col); num.refCol(j,col2); col.copy(col2); } } }; /// make a sparse copy of the current matrix template <typename T> inline void Matrix<T>::toSparse(SpMatrix<T>& out) const { out.clear(); int count=0; int* pB; #pragma omp critical { pB=new int[_n+1]; } int* pE=pB+1; for (int i = 0; i<_n*_m; ++i) if (_X[i] != 0) ++count; int* r; T* v; #pragma omp critical { r=new int[count]; v=new T[count]; } count=0; for (int i = 0; i<_n; ++i) { pB[i]=count; for (int j = 0; j<_m; ++j) { if (_X[i*_m+j] != 0) { v[count]=_X[i*_m+j]; r[count++]=j; } } pE[i]=count; } out._v=v; out._r=r; out._pB=pB; out._pE=pE; out._m=_m; out._n=_n; out._nzmax=count; out._externAlloc=false; }; /// make a sparse copy of the current matrix template <typename T> inline void Matrix<T>::toSparseTrans( SpMatrix<T>& out) { out.clear(); int count=0; int* pB; #pragma omp critical { pB=new int[_m+1]; } int* pE=pB+1; for (int i = 0; i<_n*_m; ++i) if (_X[i] != 0) ++count; int* r; T* v; #pragma omp critical { r=new int[count]; v=new T[count]; } count=0; for (int i = 0; i<_m; ++i) { pB[i]=count; for (int j = 0; j<_n; ++j) { if (_X[i+j*_m] != 0) { v[count]=_X[j*_m+i]; r[count++]=j; } } pE[i]=count; } out._v=v; out._r=r; out._pB=pB; out._pE=pE; out._m=_n; out._n=_m; out._nzmax=count; out._externAlloc=false; }; /// make a reference of the matrix to a vector vec template <typename T> inline void Matrix<T>::toVect( Vector<T>& vec) const { vec.clear(); vec._externAlloc=true; vec._n=_n*_m; vec._X=_X; }; /// merge two dictionaries template <typename T> inline void Matrix<T>::merge(const Matrix<T>& B, Matrix<T>& C) const { const int K =_n; Matrix<T> G; this->mult(B,G,true,false); std::list<int> list; for (int i = 0; i<G.n(); ++i) { Vector<T> g; G.refCol(i,g); T fmax=g.fmaxval(); if (fmax < 0.995) list.push_back(i); } C.resize(_m,K+list.size()); for (int i = 0; i<K; ++i) { Vector<T> d, d2; C.refCol(i,d); this->refCol(i,d2); d.copy(d2); } int count=0; for (std::list<int>::const_iterator it = list.begin(); it != list.end(); ++it) { Vector<T> d, d2; C.refCol(K+count,d); B.refCol(*it,d2); d.copy(d2); ++count; } }; /* *********************************** * Implementation of the class Vector * ***********************************/ /// Empty constructor template <typename T> Vector<T>::Vector() : _externAlloc(true), _X(NULL), _n(0) { }; /// Constructor. Create a new vector of size n template <typename T> Vector<T>::Vector(int n) : _externAlloc(false), _n(n) { #pragma omp critical { _X=new T[_n]; } }; /// Constructor with existing data template <typename T> Vector<T>::Vector(T* X, int n) : _externAlloc(true), _X(X), _n(n) { }; /// Copy constructor template <typename T> Vector<T>::Vector(const Vector<T>& vec) : _externAlloc(false), _n(vec._n) { #pragma omp critical { _X=new T[_n]; } cblas_copy<T>(_n,vec._X,1,_X,1); }; /// Destructor template <typename T> Vector<T>::~Vector() { clear(); }; /// Print the vector to std::cout template <> inline void Vector<double>::print(const char* name) const { printf("%s, %d\n",name,_n); for (int i = 0; i<_n; ++i) { printf("%g ",_X[i]); } printf("\n"); }; /// Print the vector to std::cout template <> inline void Vector<float>::print(const char* name) const { printf("%s, %d\n",name,_n); for (int i = 0; i<_n; ++i) { printf("%g ",_X[i]); } printf("\n"); }; /// Print the vector to std::cout template <> inline void Vector<int>::print(const char* name) const { printf("%s, %d\n",name,_n); for (int i = 0; i<_n; ++i) { printf("%d ",_X[i]); } printf("\n"); }; /// Print the vector to std::cout template <> inline void Vector<bool>::print(const char* name) const { printf("%s, %d\n",name,_n); for (int i = 0; i<_n; ++i) { printf("%d ",_X[i] ? 1 : 0); } printf("\n"); }; /// returns the index of the largest value template <typename T> inline int Vector<T>::max() const { int imax=0; T max=_X[0]; for (int j = 1; j<_n; ++j) { T cur = _X[j]; if (cur > max) { imax=j; max = cur; } } return imax; }; /// returns the index of the minimum value template <typename T> inline int Vector<T>::min() const { int imin=0; T min=_X[0]; for (int j = 1; j<_n; ++j) { T cur = _X[j]; if (cur < min) { imin=j; min = cur; } } return imin; }; /// returns the maximum value template <typename T> inline T Vector<T>::maxval() const { return _X[this->max()]; }; /// returns the minimum value template <typename T> inline T Vector<T>::minval() const { return _X[this->min()]; }; /// returns the maximum magnitude template <typename T> inline T Vector<T>::fmaxval() const { return fabs(_X[this->fmax()]); }; /// returns the minimum magnitude template <typename T> inline T Vector<T>::fminval() const { return fabs(_X[this->fmin()]); }; template <typename T> inline void Vector<T>::logspace(const int n, const T a, const T b) { T first=log10(a); T last=log10(b); T step = (last-first)/(n-1); this->resize(n); _X[0]=first; for (int i = 1; i<_n; ++i) _X[i]=_X[i-1]+step; for (int i = 0; i<_n; ++i) _X[i]=pow(T(10.0),_X[i]); } template <typename T> inline int Vector<T>::nnz() const { int sum=0; for (int i = 0; i<_n; ++i) if (_X[i] != T()) ++sum; return sum; }; /// generate logarithmically spaced values template <> inline void Vector<int>::logspace(const int n, const int a, const int b) { Vector<double> tmp(n); tmp.logspace(n,double(a),double(b)); this->resize(n); _X[0]=a; _X[n-1]=b; for (int i = 1; i<_n-1; ++i) { int candidate=static_cast<int>(floor(static_cast<double>(tmp[i]))); _X[i]= candidate > _X[i-1] ? candidate : _X[i-1]+1; } } /// returns the index of the value with largest magnitude template <typename T> inline int Vector<T>::fmax() const { return cblas_iamax<T>(_n,_X,1); }; /// returns the index of the value with smallest magnitude template <typename T> inline int Vector<T>::fmin() const { return cblas_iamin<T>(_n,_X,1); }; /// returns a reference to X[index] template <typename T> inline T& Vector<T>::operator[] (const int i) { assert(i>=0 && i<_n); return _X[i]; }; /// returns X[index] template <typename T> inline T Vector<T>::operator[] (const int i) const { assert(i>=0 && i<_n); return _X[i]; }; /// make a copy of x template <typename T> inline void Vector<T>::copy(const Vector<T>& x) { this->resize(x.n()); //cblas_copy<T>(_n,x._X,1,_X,1); memcpy(_X,x._X,_n*sizeof(T)); }; /// Set all values to zero template <typename T> inline void Vector<T>::setZeros() { memset(_X,0,_n*sizeof(T)); }; /// resize the vector template <typename T> inline void Vector<T>::resize(const int n) { if (_n == n) return; clear(); #pragma omp critical { _X=new T[n]; } _n=n; _externAlloc=false; this->setZeros(); }; /// change the data of the vector template <typename T> inline void Vector<T>::setPointer(T* X, const int n) { clear(); _externAlloc=true; _X=X; _n=n; }; /// put a random permutation of size n (for integral vectors) template <> inline void Vector<int>::randperm(int n) { resize(n); Vector<int> table(n); for (int i = 0; i<n; ++i) table[i]=i; int size=n; for (int i = 0; i<n; ++i) { const int ind=random() % size; _X[i]=table[ind]; table[ind]=table[size-1]; --size; } }; /// put random values in the vector (white Gaussian Noise) template <typename T> inline void Vector<T>::setAleat() { for (int i = 0; i<_n; ++i) _X[i]=normalDistrib<T>(); }; /// clear the vector template <typename T> inline void Vector<T>::clear() { if (!_externAlloc) delete[](_X); _n=0; _X=NULL; _externAlloc=true; }; /// performs soft-thresholding of the vector template <typename T> inline void Vector<T>::softThrshold(const T nu) { for (int i = 0; i<_n; ++i) { if (_X[i] > nu) { _X[i] -= nu; } else if (_X[i] < -nu) { _X[i] += nu; } else { _X[i] = T(); } } }; /// performs soft-thresholding of the vector template <typename T> inline void Vector<T>::hardThrshold(const T nu) { for (int i = 0; i<_n; ++i) { if (!(_X[i] > nu || _X[i] < -nu)) { _X[i] = 0; } } }; /// performs thresholding of the vector template <typename T> inline void Vector<T>::thrsmax(const T nu) { for (int i = 0; i<_n; ++i) _X[i]=MAX(_X[i],nu); } /// performs thresholding of the vector template <typename T> inline void Vector<T>::thrsmin(const T nu) { for (int i = 0; i<_n; ++i) _X[i]=MIN(_X[i],nu); } /// performs thresholding of the vector template <typename T> inline void Vector<T>::thrsabsmin(const T nu) { for (int i = 0; i<_n; ++i) _X[i]=MAX(MIN(_X[i],nu),-nu); } /// performs thresholding of the vector template <typename T> inline void Vector<T>::thrshold(const T nu) { for (int i = 0; i<_n; ++i) if (abs<T>(_X[i]) < nu) _X[i]=0; } /// performs soft-thresholding of the vector template <typename T> inline void Vector<T>::thrsPos() { for (int i = 0; i<_n; ++i) { if (_X[i] < 0) _X[i]=0; } }; template <> inline bool Vector<bool>::alltrue() const { for (int i = 0; i<_n; ++i) { if (!_X[i]) return false; } return true; }; template <> inline bool Vector<bool>::allfalse() const { for (int i = 0; i<_n; ++i) { if (_X[i]) return false; } return true; }; /// set each value of the vector to val template <typename T> inline void Vector<T>::set(const T val) { for (int i = 0; i<_n; ++i) _X[i]=val; }; /// returns ||A||_2 template <typename T> inline T Vector<T>::nrm2() const { return cblas_nrm2<T>(_n,_X,1); }; /// returns ||A||_2^2 template <typename T> inline T Vector<T>::nrm2sq() const { return cblas_dot<T>(_n,_X,1,_X,1); }; /// returns A'x template <typename T> inline T Vector<T>::dot(const Vector<T>& x) const { assert(_n == x._n); return cblas_dot<T>(_n,_X,1,x._X,1); }; /// returns A'x, when x is sparse template <typename T> inline T Vector<T>::dot(const SpVector<T>& x) const { T sum=0; const T* v = x._v; const int* r = x._r; for (int i = 0; i<x._L; ++i) { sum += _X[r[i]]*v[i]; } return sum; }; /// A <- A + a*x template <typename T> inline void Vector<T>::add(const Vector<T>& x, const T a) { assert(_n == x._n); cblas_axpy<T>(_n,a,x._X,1,_X,1); }; /// A <- A + a*x template <typename T> inline void Vector<T>::add(const SpVector<T>& x, const T a) { if (a == 1.0) { for (int i = 0; i<x._L; ++i) _X[x._r[i]]+=x._v[i]; } else { for (int i = 0; i<x._L; ++i) _X[x._r[i]]+=a*x._v[i]; } }; /// adds a to each value in the vector template <typename T> inline void Vector<T>::add(const T a) { for (int i = 0; i<_n; ++i) _X[i]+=a; }; /// A <- A - x template <typename T> inline void Vector<T>::sub(const Vector<T>& x) { assert(_n == x._n); vSub<T>(_n,_X,x._X,_X); }; /// A <- A + a*x template <typename T> inline void Vector<T>::sub(const SpVector<T>& x) { for (int i = 0; i<x._L; ++i) _X[x._r[i]]-=x._v[i]; }; /// A <- A ./ x template <typename T> inline void Vector<T>::div(const Vector<T>& x) { assert(_n == x._n); vDiv<T>(_n,_X,x._X,_X); }; /// A <- x ./ y template <typename T> inline void Vector<T>::div(const Vector<T>& x, const Vector<T>& y) { assert(_n == x._n); vDiv<T>(_n,x._X,y._X,_X); }; /// A <- x .^ 2 template <typename T> inline void Vector<T>::sqr(const Vector<T>& x) { this->resize(x._n); vSqr<T>(_n,x._X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::Invsqrt(const Vector<T>& x) { this->resize(x._n); vInvSqrt<T>(_n,x._X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::Sqrt(const Vector<T>& x) { this->resize(x._n); vSqrt<T>(_n,x._X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::Invsqrt() { vInvSqrt<T>(_n,_X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::Sqrt() { vSqrt<T>(_n,_X,_X); } /// A <- 1./x template <typename T> inline void Vector<T>::inv(const Vector<T>& x) { this->resize(x.n()); vInv<T>(_n,x._X,_X); }; /// A <- 1./A template <typename T> inline void Vector<T>::inv() { vInv<T>(_n,_X,_X); }; /// A <- x .* y template <typename T> inline void Vector<T>::mult(const Vector<T>& x, const Vector<T>& y) { this->resize(x.n()); vMul<T>(_n,x._X,y._X,_X); }; ; /// normalize the vector template <typename T> inline void Vector<T>::normalize() { T norm=nrm2(); if (norm > EPSILON) scal(1.0/norm); }; /// normalize the vector template <typename T> inline void Vector<T>::normalize2() { T norm=nrm2(); if (norm > T(1.0)) scal(1.0/norm); }; /// whiten template <typename T> inline void Vector<T>::whiten( Vector<T>& meanv, const bool pattern) { if (pattern) { const int n =static_cast<int>(sqrt(static_cast<T>(_n))); int count[4]; for (int i = 0; i<4; ++i) count[i]=0; int offsetx=0; for (int j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; int offsety=0; for (int k = 0; k<n; ++k) { offsety= (offsety+1) % 2; meanv[2*offsetx+offsety]+=_X[j*n+k]; count[2*offsetx+offsety]++; } } for (int i = 0; i<4; ++i) meanv[i] /= count[i]; offsetx=0; for (int j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; int offsety=0; for (int k = 0; k<n; ++k) { offsety= (offsety+1) % 2; _X[j*n+k]-=meanv[2*offsetx+offsety]; } } } else { const int V = meanv.n(); const int sizePatch=_n/V; for (int j = 0; j<V; ++j) { T mean = 0; for (int k = 0; k<sizePatch; ++k) { mean+=_X[sizePatch*j+k]; } mean /= sizePatch; for (int k = 0; k<sizePatch; ++k) { _X[sizePatch*j+k]-=mean; } meanv[j]=mean; } } }; /// whiten template <typename T> inline void Vector<T>::whiten( Vector<T>& meanv, const Vector<T>& mask) { const int V = meanv.n(); const int sizePatch=_n/V; for (int j = 0; j<V; ++j) { T mean = 0; for (int k = 0; k<sizePatch; ++k) { mean+=_X[sizePatch*j+k]; } mean /= cblas_asum(sizePatch,mask._X+j*sizePatch,1); for (int k = 0; k<sizePatch; ++k) { if (mask[sizePatch*j+k]) _X[sizePatch*j+k]-=mean; } meanv[j]=mean; } }; /// whiten template <typename T> inline void Vector<T>::whiten(const int V) { const int sizePatch=_n/V; for (int j = 0; j<V; ++j) { T mean = 0; for (int k = 0; k<sizePatch; ++k) { mean+=_X[sizePatch*j+k]; } mean /= sizePatch; for (int k = 0; k<sizePatch; ++k) { _X[sizePatch*j+k]-=mean; } } }; template <typename T> inline T Vector<T>::KL(const Vector<T>& Y) { T sum = 0; T* prY = Y.rawX(); // Y.print("Y"); // this->print("X"); // stop(); for (int i = 0; i<_n; ++i) { if (_X[i] > 1e-20) { if (prY[i] < 1e-60) { sum += 1e200; } else { sum += _X[i]*log_alt<T>(_X[i]/prY[i]); } //sum += _X[i]*log_alt<T>(_X[i]/(prY[i]+1e-100)); } } sum += T(-1.0) + Y.sum(); return sum; }; /// unwhiten template <typename T> inline void Vector<T>::unwhiten( Vector<T>& meanv, const bool pattern) { if (pattern) { const int n =static_cast<int>(sqrt(static_cast<T>(_n))); int offsetx=0; for (int j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; int offsety=0; for (int k = 0; k<n; ++k) { offsety= (offsety+1) % 2; _X[j*n+k]+=meanv[2*offsetx+offsety]; } } } else { const int V = meanv.n(); const int sizePatch=_n/V; for (int j = 0; j<V; ++j) { T mean = meanv[j]; for (int k = 0; k<sizePatch; ++k) { _X[sizePatch*j+k]+=mean; } } } }; /// return the mean template <typename T> inline T Vector<T>::mean() { return this->sum()/_n; } /// return the std template <typename T> inline T Vector<T>::std() { T E = this->mean(); T std=0; for (int i = 0; i<_n; ++i) { T tmp=_X[i]-E; std += tmp*tmp; } std /= _n; return sqr_alt<T>(std); } /// scale the vector by a template <typename T> inline void Vector<T>::scal(const T a) { return cblas_scal<T>(_n,a,_X,1); }; /// A <- -A template <typename T> inline void Vector<T>::neg() { for (int i = 0; i<_n; ++i) _X[i]=-_X[i]; }; /// replace each value by its exponential template <typename T> inline void Vector<T>::exp() { vExp<T>(_n,_X,_X); }; /// replace each value by its logarithm template <typename T> inline void Vector<T>::log() { for (int i=0; i<_n; ++i) _X[i]=alt_log<T>(_X[i]); }; /// replace each value by its exponential template <typename T> inline void Vector<T>::logexp() { for (int i = 0; i<_n; ++i) { if (_X[i] < -30) { _X[i]=0; } else if (_X[i] < 30) { _X[i]= alt_log<T>( T(1.0) + exp_alt<T>( _X[i] ) ); } } }; /// replace each value by its exponential template <typename T> inline T Vector<T>::softmax(const int y) { this->add(-_X[y]); _X[y]=-INFINITY; T max=this->maxval(); if (max > 30) { return max; } else if (max < -30) { return 0; } else { _X[y]=T(0.0); this->exp(); return alt_log<T>(this->sum()); } }; /// computes the sum of the magnitudes of the vector template <typename T> inline T Vector<T>::asum() const { return cblas_asum<T>(_n,_X,1); }; template <typename T> inline T Vector<T>::lzero() const { int count=0; for (int i = 0; i<_n; ++i) if (_X[i] != 0) ++count; return count; }; template <typename T> inline T Vector<T>::afused() const { T sum = 0; for (int i = 1; i<_n; ++i) { sum += abs<T>(_X[i]-_X[i-1]); } return sum; } /// returns the sum of the vector template <typename T> inline T Vector<T>::sum() const { T sum=T(); for (int i = 0; i<_n; ++i) sum +=_X[i]; return sum; }; /// puts in signs, the sign of each point in the vector template <typename T> inline void Vector<T>::sign(Vector<T>& signs) const { T* prSign=signs.rawX(); for (int i = 0; i<_n; ++i) { if (_X[i] == 0) { prSign[i]=0.0; } else { prSign[i] = _X[i] > 0 ? 1.0 : -1.0; } } }; /// projects the vector onto the l1 ball of radius thrs, /// returns true if the returned vector is null template <typename T> inline void Vector<T>::l1project(Vector<T>& out, const T thrs, const bool simplex) const { out.copy(*this); if (simplex) { out.thrsPos(); } else { vAbs<T>(_n,out._X,out._X); } T norm1 = out.sum(); if (norm1 <= thrs) { if (!simplex) out.copy(*this); return; } T* prU = out._X; int sizeU = _n; T sum = T(); int sum_card = 0; while (sizeU > 0) { // put the pivot in prU[0] swap(prU[0],prU[sizeU/2]); T pivot = prU[0]; int sizeG=1; T sumG=pivot; for (int i = 1; i<sizeU; ++i) { if (prU[i] >= pivot) { sumG += prU[i]; swap(prU[sizeG++],prU[i]); } } if (sum + sumG - pivot*(sum_card + sizeG) <= thrs) { sum_card += sizeG; sum += sumG; prU +=sizeG; sizeU -= sizeG; } else { ++prU; sizeU = sizeG-1; } } T lambda = (sum-thrs)/sum_card; out.copy(*this); if (simplex) { out.thrsPos(); } out.softThrshold(lambda); }; /// projects the vector onto the l1 ball of radius thrs, /// returns true if the returned vector is null template <typename T> inline void Vector<T>::l1project_weighted(Vector<T>& out, const Vector<T>& weights, const T thrs, const bool residual) const { out.copy(*this); if (thrs==0) { out.setZeros(); return; } vAbs<T>(_n,out._X,out._X); out.div(weights); Vector<int> keys(_n); for (int i = 0; i<_n; ++i) keys[i]=i; out.sort2(keys,false); T sum1=0; T sum2=0; T lambda=0; for (int i = 0; i<_n; ++i) { const T lambda_old=lambda; const T fact=weights[keys[i]]*weights[keys[i]]; lambda=out[i]; sum2 += fact; sum1 += fact*lambda; if (sum1 - lambda*sum2 >= thrs) { sum2-=fact; sum1-=fact*lambda; lambda=lambda_old; break; } } lambda=MAX(0,(sum1-thrs)/sum2); if (residual) { for (int i = 0; i<_n; ++i) { out._X[i]=_X[i] > 0 ? MIN(_X[i],lambda*weights[i]) : MAX(_X[i],-lambda*weights[i]); } } else { for (int i = 0; i<_n; ++i) { out._X[i]=_X[i] > 0 ? MAX(0,_X[i]-lambda*weights[i]) : MIN(0,_X[i]+lambda*weights[i]); } } }; template <typename T> inline void Vector<T>::project_sft_binary(const Vector<T>& y) { T mean = this->mean(); T thrs=mean; while (abs(mean) > EPSILON) { int n_seuils=0; for (int i = 0; i< _n; ++i) { _X[i] = _X[i]-thrs; const T val = y[i]*_X[i]; if (val > 0) { ++n_seuils; _X[i]=0; } else if (val < -1.0) { ++n_seuils; _X[i] = -y[i]; } } mean = this->mean(); thrs= mean * _n/(_n-n_seuils); } }; template <typename T> inline void Vector<T>::project_sft(const Vector<int>& labels, const int clas) { T mean = this->mean(); T thrs=mean; while (abs(mean) > EPSILON) { int n_seuils=0; for (int i = 0; i< _n; ++i) { _X[i] = _X[i]-thrs; if (labels[i]==clas) { if (_X[i] < -1.0) { _X[i]=-1.0; ++n_seuils; } } else { if (_X[i] < 0) { ++n_seuils; _X[i]=0; } } } mean = this->mean(); thrs= mean * _n/(_n-n_seuils); } }; template <typename T> inline void Vector<T>::sparseProject(Vector<T>& out, const T thrs, const int mode, const T lambda1, const T lambda2, const T lambda3, const bool pos) { if (mode == 1) { /// min_u ||b-u||_2^2 / ||u||_1 <= thrs this->l1project(out,thrs,pos); } else if (mode == 2) { /// min_u ||b-u||_2^2 / ||u||_2^2 + lambda1||u||_1 <= thrs if (lambda1 > 1e-10) { this->scal(lambda1); this->l1l2project(out,thrs,2.0/(lambda1*lambda1),pos); this->scal(T(1.0/lambda1)); out.scal(T(1.0/lambda1)); } else { out.copy(*this); out.normalize2(); out.scal(sqrt(thrs)); } } else if (mode == 3) { /// min_u ||b-u||_2^2 / ||u||_1 + (lambda1/2) ||u||_2^2 <= thrs this->l1l2project(out,thrs,lambda1,pos); } else if (mode == 4) { /// min_u 0.5||b-u||_2^2 + lambda1||u||_1 / ||u||_2^2 <= thrs out.copy(*this); if (pos) out.thrsPos(); out.softThrshold(lambda1); T nrm=out.nrm2sq(); if (nrm > thrs) out.scal(sqr_alt<T>(thrs/nrm)); } else if (mode == 5) { /// min_u 0.5||b-u||_2^2 + lambda1||u||_1 +lambda2 Fused(u) / ||u||_2^2 <= thrs // this->fusedProject(out,lambda1,lambda2,100); // T nrm=out.nrm2sq(); // if (nrm > thrs) // out.scal(sqr_alt<T>(thrs/nrm)); // } else if (mode == 6) { /// min_u 0.5||b-u||_2^2 + lambda1||u||_1 +lambda2 Fused(u) +0.5lambda_3 ||u||_2^2 this->fusedProjectHomotopy(out,lambda1,lambda2,lambda3,true); } else if (mode==6) { /// min_u ||b-u||_2^2 / lambda1||u||_1 +lambda2 Fused(u) + 0.5lambda3||u||_2^2 <= thrs this->fusedProjectHomotopy(out,lambda1/thrs,lambda2/thrs,lambda3/thrs,false); } else { /// min_u ||b-u||_2^2 / (1-lambda1)*||u||_2^2 + lambda1||u||_1 <= thrs if (lambda1 < 1e-10) { out.copy(*this); if (pos) out.thrsPos(); out.normalize2(); out.scal(sqrt(thrs)); } else if (lambda1 > 0.999999) { this->l1project(out,thrs,pos); } else { this->sparseProject(out,thrs/(1.0-lambda1),2,lambda1/(1-lambda1),0,0,pos); } } }; /// returns true if the returned vector is null template <typename T> inline void Vector<T>::l1l2projectb(Vector<T>& out, const T thrs, const T gamma, const bool pos, const int mode) { if (mode == 1) { /// min_u ||b-u||_2^2 / ||u||_2^2 + gamma ||u||_1 <= thrs this->scal(gamma); this->l1l2project(out,thrs,2.0/(gamma*gamma),pos); this->scal(T(1.0/gamma)); out.scal(T(1.0/gamma)); } else if (mode == 2) { /// min_u ||b-u||_2^2 / ||u||_1 + (gamma/2) ||u||_2^2 <= thrs this->l1l2project(out,thrs,gamma,pos); } else if (mode == 3) { /// min_u 0.5||b-u||_2^2 + gamma||u||_1 / ||u||_2^2 <= thrs out.copy(*this); if (pos) out.thrsPos(); out.softThrshold(gamma); T nrm=out.nrm2(); if (nrm > thrs) out.scal(thrs/nrm); } } /// returns true if the returned vector is null /// min_u ||b-u||_2^2 / ||u||_1 + (gamma/2) ||u||_2^2 <= thrs template <typename T> inline void Vector<T>::l1l2project(Vector<T>& out, const T thrs, const T gamma, const bool pos) const { if (gamma == 0) return this->l1project(out,thrs,pos); out.copy(*this); if (pos) { out.thrsPos(); } else { vAbs<T>(_n,out._X,out._X); } T norm = out.sum() + gamma*out.nrm2sq(); if (norm <= thrs) { if (!pos) out.copy(*this); return; } /// BEGIN T* prU = out._X; int sizeU = _n; T sum = 0; int sum_card = 0; while (sizeU > 0) { // put the pivot in prU[0] swap(prU[0],prU[sizeU/2]); T pivot = prU[0]; int sizeG=1; T sumG=pivot+0.5*gamma*pivot*pivot; for (int i = 1; i<sizeU; ++i) { if (prU[i] >= pivot) { sumG += prU[i]+0.5*gamma*prU[i]*prU[i]; swap(prU[sizeG++],prU[i]); } } if (sum + sumG - pivot*(1+0.5*gamma*pivot)*(sum_card + sizeG) < thrs*(1+gamma*pivot)*(1+gamma*pivot)) { sum_card += sizeG; sum += sumG; prU +=sizeG; sizeU -= sizeG; } else { ++prU; sizeU = sizeG-1; } } T a = gamma*gamma*thrs+0.5*gamma*sum_card; T b = 2*gamma*thrs+sum_card; T c=thrs-sum; T delta = b*b-4*a*c; T lambda = (-b+sqrt(delta))/(2*a); out.copy(*this); if (pos) { out.thrsPos(); } out.softThrshold(lambda); out.scal(T(1.0/(1+lambda*gamma))); }; template <typename T> static inline T fusedHomotopyAux(const bool& sign1, const bool& sign2, const bool& sign3, const T& c1, const T& c2) { if (sign1) { if (sign2) { return sign3 ? 0 : c2; } else { return sign3 ? -c2-c1 : -c1; } } else { if (sign2) { return sign3 ? c1 : c1+c2; } else { return sign3 ? -c2 : 0; } } }; template <typename T> inline void Vector<T>::fusedProjectHomotopy(Vector<T>& alpha, const T lambda1,const T lambda2,const T lambda3, const bool penalty) { T* pr_DtR=_X; const int K = _n; alpha.setZeros(); Vector<T> u(K); // regularization path for gamma Vector<T> Du(K); // regularization path for alpha Vector<T> DDu(K); // regularization path for alpha Vector<T> gamma(K); // auxiliary variable Vector<T> c(K); // auxiliary variables Vector<T> scores(K); // auxiliary variables gamma.setZeros(); T* pr_gamma = gamma.rawX(); T* pr_u = u.rawX(); T* pr_Du = Du.rawX(); T* pr_DDu = DDu.rawX(); T* pr_c = c.rawX(); T* pr_scores = scores.rawX(); Vector<int> ind(K+1); Vector<bool> signs(K); ind.set(K); int* pr_ind = ind.rawX(); bool* pr_signs = signs.rawX(); /// Computation of DtR T sumBeta = this->sum(); /// first element is selected, gamma and alpha are updated pr_gamma[0]=sumBeta/K; /// update alpha alpha.set(pr_gamma[0]); /// update DtR this->sub(alpha); for (int j = K-2; j>=0; --j) pr_DtR[j] += pr_DtR[j+1]; pr_DtR[0]=0; pr_ind[0]=0; pr_signs[0] = pr_DtR[0] > 0; pr_c[0]=T(1.0)/K; int currentInd=this->fmax(); T currentLambda=abs<T>(pr_DtR[currentInd]); bool newAtom = true; /// Solve the Lasso using simplified LARS for (int i = 1; i<K; ++i) { /// exit if constraints are satisfied /// min_u ||b-u||_2^2 + lambda1||u||_1 +lambda2 Fused(u) + 0.5lambda3||u||_2^2 if (penalty && currentLambda <= lambda2) break; if (!penalty) { /// min_u ||b-u||_2^2 / lambda1||u||_1 +lambda2 Fused(u) + 0.5lambda3||u||_2^2 <= 1.0 scores.copy(alpha); scores.softThrshold(lambda1*currentLambda/lambda2); scores.scal(T(1.0/(1.0+lambda3*currentLambda/lambda2))); if (lambda1*scores.asum()+lambda2*scores.afused()+0.5* lambda3*scores.nrm2sq() >= T(1.0)) break; } /// Update pr_ind and pr_c if (newAtom) { int j; for (j = 1; j<i; ++j) if (pr_ind[j] > currentInd) break; for (int k = i; k>j; --k) { pr_ind[k]=pr_ind[k-1]; pr_c[k]=pr_c[k-1]; pr_signs[k]=pr_signs[k-1]; } pr_ind[j]=currentInd; pr_signs[j]=pr_DtR[currentInd] > 0; pr_c[j-1]=T(1.0)/(pr_ind[j]-pr_ind[j-1]); pr_c[j]=T(1.0)/(pr_ind[j+1]-pr_ind[j]); } // Compute u pr_u[0]= pr_signs[1] ? -pr_c[0] : pr_c[0]; if (i == 1) { pr_u[1]=pr_signs[1] ? pr_c[0]+pr_c[1] : -pr_c[0]-pr_c[1]; } else { pr_u[1]=pr_signs[1] ? pr_c[0]+pr_c[1] : -pr_c[0]-pr_c[1]; pr_u[1]+=pr_signs[2] ? -pr_c[1] : pr_c[1]; for (int j = 2; j<i; ++j) { pr_u[j]=2*fusedHomotopyAux<T>(pr_signs[j-1], pr_signs[j],pr_signs[j+1], pr_c[j-1],pr_c[j]); } pr_u[i] = pr_signs[i-1] ? -pr_c[i-1] : pr_c[i-1]; pr_u[i] += pr_signs[i] ? pr_c[i-1]+pr_c[i] : -pr_c[i-1]-pr_c[i]; } // Compute Du pr_Du[0]=pr_u[0]; for (int k = 1; k<pr_ind[1]; ++k) pr_Du[k]=pr_Du[0]; for (int j = 1; j<=i; ++j) { pr_Du[pr_ind[j]]=pr_Du[pr_ind[j]-1]+pr_u[j]; for (int k = pr_ind[j]+1; k<pr_ind[j+1]; ++k) pr_Du[k]=pr_Du[pr_ind[j]]; } /// Compute DDu DDu.copy(Du); for (int j = K-2; j>=0; --j) pr_DDu[j] += pr_DDu[j+1]; /// Check constraints T max_step1 = INFINITY; if (penalty) { max_step1 = currentLambda-lambda2; } /// Check changes of sign T max_step2 = INFINITY; int step_out = -1; for (int j = 1; j<=i; ++j) { T ratio = -pr_gamma[pr_ind[j]]/pr_u[j]; if (ratio > 0 && ratio <= max_step2) { max_step2=ratio; step_out=j; } } T max_step3 = INFINITY; /// Check new variables entering the active set for (int j = 1; j<K; ++j) { T sc1 = (currentLambda-pr_DtR[j])/(T(1.0)-pr_DDu[j]); T sc2 = (currentLambda+pr_DtR[j])/(T(1.0)+pr_DDu[j]); if (sc1 <= 1e-10) sc1=INFINITY; if (sc2 <= 1e-10) sc2=INFINITY; pr_scores[j]= MIN(sc1,sc2); } for (int j = 0; j<=i; ++j) { pr_scores[pr_ind[j]]=INFINITY; } currentInd = scores.fmin(); max_step3 = pr_scores[currentInd]; T step = MIN(max_step1,MIN(max_step3,max_step2)); if (step == 0 || step == INFINITY) break; /// Update gamma, alpha, DtR, currentLambda for (int j = 0; j<=i; ++j) { pr_gamma[pr_ind[j]]+=step*pr_u[j]; } alpha.add(Du,step); this->add(DDu,-step); currentLambda -= step; if (step == max_step2) { /// Update signs,pr_ind, pr_c for (int k = step_out; k<=i; ++k) pr_ind[k]=pr_ind[k+1]; pr_ind[i]=K; for (int k = step_out; k<=i; ++k) pr_signs[k]=pr_signs[k+1]; pr_c[step_out-1]=T(1.0)/(pr_ind[step_out]-pr_ind[step_out-1]); pr_c[step_out]=T(1.0)/(pr_ind[step_out+1]-pr_ind[step_out]); i-=2; newAtom=false; } else { newAtom=true; } } if (penalty) { alpha.softThrshold(lambda1); alpha.scal(T(1.0/(1.0+lambda3))); } else { alpha.softThrshold(lambda1*currentLambda/lambda2); alpha.scal(T(1.0/(1.0+lambda3*currentLambda/lambda2))); } }; template <typename T> inline void Vector<T>::fusedProject(Vector<T>& alpha, const T lambda1, const T lambda2, const int itermax) { T* pr_alpha= alpha.rawX(); T* pr_beta=_X; const int K = alpha.n(); T total_alpha =alpha.sum(); /// Modification of beta for (int i = K-2; i>=0; --i) pr_beta[i]+=pr_beta[i+1]; for (int i = 0; i<itermax; ++i) { T sum_alpha=0; T sum_diff = 0; /// Update first coordinate T gamma_old=pr_alpha[0]; pr_alpha[0]=(K*gamma_old+pr_beta[0]- total_alpha)/K; T diff = pr_alpha[0]-gamma_old; sum_diff += diff; sum_alpha += pr_alpha[0]; total_alpha +=K*diff; /// Update alpha_j for (int j = 1; j<K; ++j) { pr_alpha[j]+=sum_diff; T gamma_old=pr_alpha[j]-pr_alpha[j-1]; T gamma_new=softThrs((K-j)*gamma_old+pr_beta[j]- (total_alpha-sum_alpha),lambda2)/(K-j); pr_alpha[j]=pr_alpha[j-1]+gamma_new; T diff = gamma_new-gamma_old; sum_diff += diff; sum_alpha+=pr_alpha[j]; total_alpha +=(K-j)*diff; } } alpha.softThrshold(lambda1); }; /// sort the vector template <typename T> inline void Vector<T>::sort(const bool mode) { if (mode) { lasrt<T>(incr,_n,_X); } else { lasrt<T>(decr,_n,_X); } }; /// sort the vector template <typename T> inline void Vector<T>::sort(Vector<T>& out, const bool mode) const { out.copy(*this); out.sort(mode); }; template <typename T> inline void Vector<T>::sort2(Vector<int>& key, const bool mode) { quick_sort(key.rawX(),_X,0,_n-1,mode); }; template <typename T> inline void Vector<T>::sort2(Vector<T>& out, Vector<int>& key, const bool mode) const { out.copy(*this); out.sort2(key,mode); } template <typename T> inline void Vector<T>::applyBayerPattern(const int offset) { int sizePatch=_n/3; int n = static_cast<int>(sqrt(static_cast<T>(sizePatch))); if (offset == 0) { // R for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 1 : 2; const int off = (i % 2) ? 0 : 1; for (int j = off; j<n; j+=step) { _X[i*n+j]=0; } } // G for (int i = 0; i<n; ++i) { const int step = 2; const int off = (i % 2) ? 1 : 0; for (int j = off; j<n; j+=step) { _X[sizePatch+i*n+j]=0; } } // B for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 2 : 1; const int off = 0; for (int j = off; j<n; j+=step) { _X[2*sizePatch+i*n+j]=0; } } } else if (offset == 1) { // R for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 2 : 1; const int off = (i % 2) ? 1 : 0; for (int j = off; j<n; j+=step) { _X[i*n+j]=0; } } // G for (int i = 0; i<n; ++i) { const int step = 2; const int off = (i % 2) ? 0 : 1; for (int j = off; j<n; j+=step) { _X[sizePatch+i*n+j]=0; } } // B for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 1 : 2; const int off = 0; for (int j = off; j<n; j+=step) { _X[2*sizePatch+i*n+j]=0; } } } else if (offset == 2) { // R for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 1 : 2; const int off = 0; for (int j = off; j<n; j+=step) { _X[i*n+j]=0; } } // G for (int i = 0; i<n; ++i) { const int step = 2; const int off = (i % 2) ? 0 : 1; for (int j = off; j<n; j+=step) { _X[sizePatch+i*n+j]=0; } } // B for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 2 : 1; const int off = (i % 2) ? 1 : 0; for (int j = off; j<n; j+=step) { _X[2*sizePatch+i*n+j]=0; } } } else if (offset == 3) { // R for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 2 : 1; const int off = 0; for (int j = off; j<n; j+=step) { _X[i*n+j]=0; } } // G for (int i = 0; i<n; ++i) { const int step = 2; const int off = (i % 2) ? 1 : 0; for (int j = off; j<n; j+=step) { _X[sizePatch+i*n+j]=0; } } // B for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 1 : 2; const int off = (i % 2) ? 0 : 1; for (int j = off; j<n; j+=step) { _X[2*sizePatch+i*n+j]=0; } } } }; /// make a sparse copy template <typename T> inline void Vector<T>::toSparse( SpVector<T>& vec) const { int L=0; T* v = vec._v; int* r = vec._r; for (int i = 0; i<_n; ++i) { if (_X[i] != T()) { v[L]=_X[i]; r[L++]=i; } } vec._L=L; }; template <typename T> inline void Vector<T>::copyMask(Vector<T>& out, Vector<bool>& mask) const { out.resize(_n); int pointer=0; for (int i = 0; i<_n; ++i) { if (mask[i]) out[pointer++]=_X[i]; } out.setn(pointer); }; template <typename T> inline void Matrix<T>::copyMask(Matrix<T>& out, Vector<bool>& mask) const { out.resize(_m,_n); int count=0; for (int i = 0; i<mask.n(); ++i) if (mask[i]) ++count; out.setm(count); for (int i = 0; i<_n; ++i) { int pointer=0; for (int j = 0; j<_m; ++j) { if (mask[j]) { out[i*count+pointer]=_X[i*_m+j]; ++pointer; } } } }; /* **************************** * Implementation of SpMatrix * ****************************/ /// Constructor, CSC format, existing data template <typename T> SpMatrix<T>::SpMatrix(T* v, int* r, int* pB, int* pE, int m, int n, int nzmax) : _externAlloc(true), _v(v), _r(r), _pB(pB), _pE(pE), _m(m), _n(n), _nzmax(nzmax) { }; /// Constructor, new m x n matrix, with at most nzmax non-zeros values template <typename T> SpMatrix<T>::SpMatrix(int m, int n, int nzmax) : _externAlloc(false), _m(m), _n(n), _nzmax(nzmax) { #pragma omp critical { _v=new T[nzmax]; _r=new int[nzmax]; _pB=new int[_n+1]; } _pE=_pB+1; }; /// Empty constructor template <typename T> SpMatrix<T>::SpMatrix() : _externAlloc(true), _v(NULL), _r(NULL), _pB(NULL), _pE(NULL), _m(0),_n(0),_nzmax(0) { }; template <typename T> inline void SpMatrix<T>::copy(const SpMatrix<T>& mat) { this->resize(mat._m,mat._n,mat._nzmax); memcpy(_v,mat._v,_nzmax*sizeof(T)); memcpy(_r,mat._r,_nzmax*sizeof(int)); memcpy(_pB,mat._pB,(_n+1)*sizeof(int)); } /// Destructor template <typename T> SpMatrix<T>::~SpMatrix() { clear(); }; /// reference the column i into vec template <typename T> inline void SpMatrix<T>::refCol(int i, SpVector<T>& vec) const { if (vec._nzmax > 0) vec.clear(); vec._v=_v+_pB[i]; vec._r=_r+_pB[i]; vec._externAlloc=true; vec._L=_pE[i]-_pB[i]; vec._nzmax=vec._L; }; /// print the sparse matrix template<typename T> inline void SpMatrix<T>::print(const string& name) const { cerr << name << endl; cerr << _m << " x " << _n << " , " << _nzmax << endl; for (int i = 0; i<_n; ++i) { for (int j = _pB[i]; j<_pE[i]; ++j) { cerr << "(" <<_r[j] << "," << i << ") = " << _v[j] << endl; } } }; template<typename T> inline T SpMatrix<T>::operator[](const int index) const { const int num_col=(index/_m); const int num_row=index -num_col*_m; T val = 0; for (int j = _pB[num_col]; j<_pB[num_col+1]; ++j) { if (_r[j]==num_row) { val=_v[j]; break; } } return val; }; template<typename T> void SpMatrix<T>::getData(Vector<T>& data, const int index) const { data.resize(_m); data.setZeros(); for (int i = _pB[index]; i< _pB[index+1]; ++i) data[_r[i]]=_v[i]; }; template<typename T> void SpMatrix<T>::getGroup(Matrix<T>& data, const vector_groups& groups, const int i) const { const group& gr = groups[i]; const int N = gr.size(); data.resize(_m,N); int count=0; Vector<T> col; for (group::const_iterator it = gr.begin(); it != gr.end(); ++it) { data.refCol(count,col); this->getData(col,*it); ++count; } }; /// compute the sum of the matrix elements template <typename T> inline T SpMatrix<T>::asum() const { return cblas_asum<T>(_pB[_n],_v,1); }; /// compute the sum of the matrix elements template <typename T> inline T SpMatrix<T>::normFsq() const { return cblas_dot<T>(_pB[_n],_v,1,_v,1); }; template <typename T> inline void SpMatrix<T>::add_direct(const SpMatrix<T>& mat, const T a) { Vector<T> v2(mat._v,mat._nzmax); Vector<T> v1(_v,_nzmax); v1.add(v2,a); } template <typename T> inline void SpMatrix<T>::copy_direct(const SpMatrix<T>& mat) { Vector<T> v2(mat._v,_pB[_n]); Vector<T> v1(_v,_pB[_n]); v1.copy(v2); } template <typename T> inline T SpMatrix<T>::dot_direct(const SpMatrix<T>& mat) const { Vector<T> v2(mat._v,_pB[_n]); Vector<T> v1(_v,_pB[_n]); return v1.dot(v2); } /// clear the matrix template <typename T> inline void SpMatrix<T>::clear() { if (!_externAlloc) { delete[](_r); delete[](_v); delete[](_pB); } _n=0; _m=0; _nzmax=0; _v=NULL; _r=NULL; _pB=NULL; _pE=NULL; _externAlloc=true; }; /// resize the matrix template <typename T> inline void SpMatrix<T>::resize(const int m, const int n, const int nzmax) { if (n == _n && m == _m && nzmax == _nzmax) return; this->clear(); _n=n; _m=m; _nzmax=nzmax; _externAlloc=false; #pragma omp critical { _v = new T[nzmax]; _r = new int[nzmax]; _pB = new int[_n+1]; } _pE = _pB+1; for (int i = 0; i<=_n; ++i) _pB[i]=0; }; /// resize the matrix template <typename T> inline void SpMatrix<T>::scal(const T a) const { cblas_scal<T>(_pB[_n],a,_v,1); }; /// y <- A'*x template <typename T> inline void SpMatrix<T>::multTrans(const Vector<T>& x, Vector<T>& y, const T alpha, const T beta) const { y.resize(_n); if (beta) { y.scal(beta); } else { y.setZeros(); } const T* prX = x.rawX(); for (int i = 0; i<_n; ++i) { T sum=T(); for (int j = _pB[i]; j<_pE[i]; ++j) { sum+=_v[j]*prX[_r[j]]; } y[i] += alpha*sum; } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> inline void SpMatrix<T>::multTrans(const SpVector<T>& x, Vector<T>& y, const T alpha, const T beta) const { y.resize(_n); if (beta) { y.scal(beta); } else { y.setZeros(); } T* prY = y.rawX(); SpVector<T> col; for (int i = 0; i<_n; ++i) { this->refCol(i,col); prY[i] += alpha*x.dot(col); } }; /// y <- A*x template <typename T> inline void SpMatrix<T>::mult(const Vector<T>& x, Vector<T>& y, const T alpha, const T beta) const { y.resize(_m); if (beta) { y.scal(beta); } else { y.setZeros(); } const T* prX = x.rawX(); for (int i = 0; i<_n; ++i) { T sca=alpha* prX[i]; for (int j = _pB[i]; j<_pE[i]; ++j) { y[_r[j]] += sca*_v[j]; } } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> inline void SpMatrix<T>::mult(const SpVector<T>& x, Vector<T>& y, const T alpha, const T beta) const { y.resize(_m); if (beta) { y.scal(beta); } else { y.setZeros(); } T* prY = y.rawX(); for (int i = 0; i<x.L(); ++i) { int ind=x.r(i); T val = alpha * x.v(i); for (int j = _pB[ind]; j<_pE[ind]; ++j) { prY[_r[j]] += val *_v[j]; } } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T> inline void SpMatrix<T>::mult(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { if (transA) { if (transB) { C.resize(_n,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T> tmp; Vector<T> row(B.m()); for (int i = 0; i<_n; ++i) { this->refCol(i,tmp); B.mult(tmp,row); C.addRow(i,row,a); } } else { C.resize(_n,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T> tmp; Vector<T> row(B.n()); for (int i = 0; i<_n; ++i) { this->refCol(i,tmp); B.multTrans(tmp,row); C.addRow(i,row,a); } } } else { if (transB) { C.resize(_m,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> row(B.n()); Vector<T> col; for (int i = 0; i<B.m(); ++i) { B.copyRow(i,row); C.refCol(i,col); this->mult(row,col,a,T(1.0)); } } else { C.resize(_m,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> colB; Vector<T> colC; for (int i = 0; i<B.n(); ++i) { B.refCol(i,colB); C.refCol(i,colC); this->mult(colB,colC,a,T(1.0)); } } } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T> inline void SpMatrix<T>::mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { if (transA) { if (transB) { C.resize(_n,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T> tmp; Vector<T> row(B.m()); for (int i = 0; i<_n; ++i) { this->refCol(i,tmp); B.mult(tmp,row); C.addRow(i,row,a); } } else { C.resize(_n,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T> tmp; Vector<T> row(B.n()); for (int i = 0; i<_n; ++i) { this->refCol(i,tmp); B.multTrans(tmp,row); C.addRow(i,row,a); } } } else { if (transB) { C.resize(_m,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T> colB; SpVector<T> colA; for (int i = 0; i<_n; ++i) { this->refCol(i,colA); B.refCol(i,colB); C.rank1Update(colA,colB,a); } } else { C.resize(_m,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T> colB; Vector<T> colC; for (int i = 0; i<B.n(); ++i) { B.refCol(i,colB); C.refCol(i,colC); this->mult(colB,colC,a); } } } }; /// perform C = a*B*A + b*C, possibly transposing A or B. template <typename T> inline void SpMatrix<T>::multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { B.mult(*this,C,transB,transA,a,b); }; template <typename T> inline T SpMatrix<T>::dot(const Matrix<T>& x) const { T sum=0; for (int i = 0; i<_n; ++i) for (int j = _pB[i]; j<_pE[i]; ++j) { sum+=_v[j]*x(_r[j],j); } return sum; }; template <typename T> inline void SpMatrix<T>::copyRow(const int ind, Vector<T>& x) const { x.resize(_n); x.setZeros(); for (int i = 0; i<_n; ++i) { for (int j = _pB[i]; j<_pE[i]; ++j) { if (_r[j]==ind) { x[i]=_v[j]; } else if (_r[j] > ind) { break; } } } }; template <typename T> inline void SpMatrix<T>::addVecToCols( const Vector<T>& vec, const T a) { const T* pr_vec = vec.rawX(); if (isEqual(a,T(1.0))) { for (int i = 0; i<_n; ++i) for (int j = _pB[i]; j<_pE[i]; ++j) _v[j] += pr_vec[_r[j]]; } else { for (int i = 0; i<_n; ++i) for (int j = _pB[i]; j<_pE[i]; ++j) _v[j] += a*pr_vec[_r[j]]; } }; template <typename T> inline void SpMatrix<T>::addVecToColsWeighted( const Vector<T>& vec, const T* weights, const T a) { const T* pr_vec = vec.rawX(); if (isEqual(a,T(1.0))) { for (int i = 0; i<_n; ++i) for (int j = _pB[i]; j<_pE[i]; ++j) _v[j] += pr_vec[_r[j]]*weights[j-_pB[i]]; } else { for (int i = 0; i<_n; ++i) for (int j = _pB[i]; j<_pE[i]; ++j) _v[j] += a*pr_vec[_r[j]]*weights[j-_pB[i]]; } }; template <typename T> inline void SpMatrix<T>::sum_cols(Vector<T>& sum) const { sum.resize(_m); sum.setZeros(); SpVector<T> tmp; for (int i = 0; i<_n; ++i) { this->refCol(i,tmp); sum.add(tmp); } }; /// aat <- A*A' template <typename T> inline void SpMatrix<T>::AAt(Matrix<T>& aat) const { int i,j,k; int K=_m; int M=_n; /* compute alpha alpha^T */ aat.resize(K,K); int NUM_THREADS=init_omp(MAX_THREADS); T* aatT=new T[NUM_THREADS*K*K]; for (j = 0; j<NUM_THREADS*K*K; ++j) aatT[j]=T(); #pragma omp parallel for private(i,j,k) for (i = 0; i<M; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=aatT+numT*K*K; for (j = _pB[i]; j<_pE[i]; ++j) { for (k = _pB[i]; k<=j; ++k) { write_area[_r[j]*K+_r[k]]+=_v[j]*_v[k]; } } } cblas_copy<T>(K*K,aatT,1,aat._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(K*K,1.0,aatT+K*K*i,1,aat._X,1); aat.fillSymmetric(); delete[](aatT); } template <typename T> inline void SpMatrix<T>::XtX(Matrix<T>& XtX) const { XtX.resize(_n,_n); XtX.setZeros(); SpVector<T> col; Vector<T> col_out; for (int i = 0; i<_n; ++i) { this->refCol(i,col); XtX.refCol(i,col_out); this->multTrans(col,col_out); } }; /// aat <- A(:,indices)*A(:,indices)' template <typename T> inline void SpMatrix<T>::AAt(Matrix<T>& aat, const Vector<int>& indices) const { int i,j,k; int K=_m; int M=indices.n(); /* compute alpha alpha^T */ aat.resize(K,K); int NUM_THREADS=init_omp(MAX_THREADS); T* aatT=new T[NUM_THREADS*K*K]; for (j = 0; j<NUM_THREADS*K*K; ++j) aatT[j]=T(); #pragma omp parallel for private(i,j,k) for (i = 0; i<M; ++i) { int ii = indices[i]; #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=aatT+numT*K*K; for (j = _pB[ii]; j<_pE[ii]; ++j) { for (k = _pB[ii]; k<=j; ++k) { write_area[_r[j]*K+_r[k]]+=_v[j]*_v[k]; } } } cblas_copy<T>(K*K,aatT,1,aat._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(K*K,1.0,aatT+K*K*i,1,aat._X,1); aat.fillSymmetric(); delete[](aatT); } /// aat <- sum_i w_i A(:,i)*A(:,i)' template <typename T> inline void SpMatrix<T>::wAAt(const Vector<T>& w, Matrix<T>& aat) const { int i,j,k; int K=_m; int M=_n; /* compute alpha alpha^T */ aat.resize(K,K); int NUM_THREADS=init_omp(MAX_THREADS); T* aatT=new T[NUM_THREADS*K*K]; for (j = 0; j<NUM_THREADS*K*K; ++j) aatT[j]=T(); #pragma omp parallel for private(i,j,k) for (i = 0; i<M; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=aatT+numT*K*K; for (j = _pB[i]; j<_pE[i]; ++j) { for (k = _pB[i]; k<=j; ++k) { write_area[_r[j]*K+_r[k]]+=w._X[i]*_v[j]*_v[k]; } } } cblas_copy<T>(K*K,aatT,1,aat._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(K*K,1.0,aatT+K*K*i,1,aat._X,1); aat.fillSymmetric(); delete[](aatT); } /// XAt <- X*A' template <typename T> inline void SpMatrix<T>::XAt(const Matrix<T>& X, Matrix<T>& XAt) const { int j,i; int n=X._m; int K=_m; int M=_n; XAt.resize(n,K); /* compute X alpha^T */ int NUM_THREADS=init_omp(MAX_THREADS); T* XatT=new T[NUM_THREADS*n*K]; for (j = 0; j<NUM_THREADS*n*K; ++j) XatT[j]=T(); #pragma omp parallel for private(i,j) for (i = 0; i<M; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=XatT+numT*n*K; for (j = _pB[i]; j<_pE[i]; ++j) { cblas_axpy<T>(n,_v[j],X._X+i*n,1,write_area+_r[j]*n,1); } } cblas_copy<T>(n*K,XatT,1,XAt._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(n*K,1.0,XatT+n*K*i,1,XAt._X,1); delete[](XatT); }; /// XAt <- X(:,indices)*A(:,indices)' template <typename T> inline void SpMatrix<T>::XAt(const Matrix<T>& X, Matrix<T>& XAt, const Vector<int>& indices) const { int j,i; int n=X._m; int K=_m; int M=indices.n(); XAt.resize(n,K); /* compute X alpha^T */ int NUM_THREADS=init_omp(MAX_THREADS); T* XatT=new T[NUM_THREADS*n*K]; for (j = 0; j<NUM_THREADS*n*K; ++j) XatT[j]=T(); #pragma omp parallel for private(i,j) for (i = 0; i<M; ++i) { int ii = indices[i]; #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=XatT+numT*n*K; for (j = _pB[ii]; j<_pE[ii]; ++j) { cblas_axpy<T>(n,_v[j],X._X+i*n,1,write_area+_r[j]*n,1); } } cblas_copy<T>(n*K,XatT,1,XAt._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(n*K,1.0,XatT+n*K*i,1,XAt._X,1); delete[](XatT); }; /// XAt <- sum_i w_i X(:,i)*A(:,i)' template <typename T> inline void SpMatrix<T>::wXAt(const Vector<T>& w, const Matrix<T>& X, Matrix<T>& XAt, const int numThreads) const { int j,l,i; int n=X._m; int K=_m; int M=_n; int Mx = X._n; int numRepX= M/Mx; assert(numRepX*Mx == M); XAt.resize(n,K); /* compute X alpha^T */ int NUM_THREADS=init_omp(numThreads); T* XatT=new T[NUM_THREADS*n*K]; for (j = 0; j<NUM_THREADS*n*K; ++j) XatT[j]=T(); #pragma omp parallel for private(i,j,l) for (i = 0; i<Mx; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T * write_area=XatT+numT*n*K; for (l = 0; l<numRepX; ++l) { int ind=numRepX*i+l; if (w._X[ind] != 0) for (j = _pB[ind]; j<_pE[ind]; ++j) { cblas_axpy<T>(n,w._X[ind]*_v[j],X._X+i*n,1,write_area+_r[j]*n,1); } } } cblas_copy<T>(n*K,XatT,1,XAt._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(n*K,1.0,XatT+n*K*i,1,XAt._X,1); delete[](XatT); }; /// copy the sparse matrix into a dense matrix template<typename T> inline void SpMatrix<T>::toFull(Matrix<T>& matrix) const { matrix.resize(_m,_n); matrix.setZeros(); T* out = matrix._X; for (int i=0; i<_n; ++i) { for (int j = _pB[i]; j<_pE[i]; ++j) { out[i*_m+_r[j]]=_v[j]; } } }; /// copy the sparse matrix into a full dense matrix template <typename T> inline void SpMatrix<T>::toFullTrans( Matrix<T>& matrix) const { matrix.resize(_n,_m); matrix.setZeros(); T* out = matrix._X; for (int i=0; i<_n; ++i) { for (int j = _pB[i]; j<_pE[i]; ++j) { out[i+_r[j]*_n]=_v[j]; } } }; /// use the data from v, r for _v, _r template <typename T> inline void SpMatrix<T>::convert(const Matrix<T>&vM, const Matrix<int>& rM, const int K) { const int M = rM.n(); const int L = rM.m(); const int* r = rM.X(); const T* v = vM.X(); int count=0; for (int i = 0; i<M*L; ++i) if (r[i] != -1) ++count; resize(K,M,count); count=0; for (int i = 0; i<M; ++i) { _pB[i]=count; for (int j = 0; j<L; ++j) { if (r[i*L+j] == -1) break; _v[count]=v[i*L+j]; _r[count++]=r[i*L+j]; } _pE[i]=count; } for (int i = 0; i<M; ++i) sort(_r,_v,_pB[i],_pE[i]-1); }; /// use the data from v, r for _v, _r template <typename T> inline void SpMatrix<T>::convert2( const Matrix<T>&vM, const Vector<int>& rv, const int K) { const int M = vM.n(); const int L = vM.m(); int* r = rv.rawX(); const T* v = vM.X(); int LL=0; for (int i = 0; i<L; ++i) if (r[i] != -1) ++LL; this->resize(K,M,LL*M); int count=0; for (int i = 0; i<M; ++i) { _pB[i]=count; for (int j = 0; j<LL; ++j) { _v[count]=v[i*L+j]; _r[count++]=r[j]; } _pE[i]=count; } for (int i = 0; i<M; ++i) sort(_r,_v,_pB[i],_pE[i]-1); }; /// returns the l2 norms ^2 of the columns template <typename T> inline void SpMatrix<T>::norm_2sq_cols(Vector<T>& norms) const { norms.resize(_n); SpVector<T> col; for (int i = 0; i<_n; ++i) { this->refCol(i,col); norms[i] = col.nrm2sq(); } }; template <typename T> inline void SpMatrix<T>::norm_0_cols(Vector<T>& norms) const { norms.resize(_n); SpVector<T> col; for (int i = 0; i<_n; ++i) { this->refCol(i,col); norms[i] = static_cast<T>(col.length()); } }; template <typename T> inline void SpMatrix<T>::norm_1_cols(Vector<T>& norms) const { norms.resize(_n); SpVector<T> col; for (int i = 0; i<_n; ++i) { this->refCol(i,col); norms[i] =col.asum(); } }; /* *************************** * Implementation of SpVector * ***************************/ /// Constructor, of the sparse vector of size L. template <typename T> SpVector<T>::SpVector(T* v, int* r, int L, int nzmax) : _externAlloc(true), _v(v), _r(r), _L(L), _nzmax(nzmax) { }; /// Constructor, allocates nzmax slots template <typename T> SpVector<T>::SpVector(int nzmax) : _externAlloc(false), _L(0), _nzmax(nzmax) { #pragma omp critical { _v = new T[nzmax]; _r = new int[nzmax]; } }; /// Empty constructor template <typename T> SpVector<T>::SpVector() : _externAlloc(true), _v(NULL), _r(NULL), _L(0), _nzmax(0) { }; /// Destructor template <typename T> SpVector<T>::~SpVector() { clear(); }; /// computes the sum of the magnitude of the elements template <typename T> inline T SpVector<T>::asum() const { return cblas_asum<T>(_L,_v,1); }; /// computes the l2 norm ^2 of the vector template <typename T> inline T SpVector<T>::nrm2sq() const { return cblas_dot<T>(_L,_v,1,_v,1); }; /// computes the l2 norm of the vector template <typename T> inline T SpVector<T>::nrm2() const { return cblas_nrm2<T>(_L,_v,1); }; /// computes the l2 norm of the vector template <typename T> inline T SpVector<T>::fmaxval() const { Vector<T> tmp(_v,_L); return tmp.fmaxval(); }; /// print the vector to std::cerr template <typename T> inline void SpVector<T>::print(const string& name) const { std::cerr << name << std::endl; std::cerr << _nzmax << std::endl; for (int i = 0; i<_L; ++i) cerr << "(" <<_r[i] << ", " << _v[i] << ")" << endl; }; /// create a reference on the vector r template <typename T> inline void SpVector<T>::refIndices( Vector<int>& indices) const { indices.setPointer(_r,_L); }; /// creates a reference on the vector val template <typename T> inline void SpVector<T>::refVal( Vector<T>& val) const { val.setPointer(_v,_L); }; /// a <- a.^2 template <typename T> inline void SpVector<T>::sqr() { vSqr<T>(_L,_v,_v); }; template <typename T> inline T SpVector<T>::dot(const SpVector<T>& vec) const { T sum=T(); int countI = 0; int countJ = 0; while (countI < _L && countJ < vec._L) { const int rI = _r[countI]; const int rJ = vec._r[countJ]; if (rI > rJ) { ++countJ; } else if (rJ > rI) { ++countI; } else { sum+=_v[countI]*vec._v[countJ]; ++countI; ++countJ; } } return sum; }; /// clears the vector template <typename T> inline void SpVector<T>::clear() { if (!_externAlloc) { delete[](_v); delete[](_r); } _v=NULL; _r=NULL; _L=0; _nzmax=0; _externAlloc=true; }; /// resizes the vector template <typename T> inline void SpVector<T>::resize(const int nzmax) { if (_nzmax != nzmax) { clear(); _nzmax=nzmax; _L=0; _externAlloc=false; #pragma omp critical { _v=new T[nzmax]; _r=new int[nzmax]; } } }; template <typename T> void inline SpVector<T>::toSpMatrix( SpMatrix<T>& out, const int m, const int n) const { out.resize(m,n,_L); cblas_copy<T>(_L,_v,1,out._v,1); int current_col=0; T* out_v=out._v; int* out_r=out._r; int* out_pB=out._pB; out_pB[0]=current_col; for (int i = 0; i<_L; ++i) { int col=_r[i]/m; if (col > current_col) { out_pB[current_col+1]=i; current_col++; i--; } else { out_r[i]=_r[i]-col*m; } } for (current_col++ ; current_col < n+1; ++current_col) out_pB[current_col]=_L; }; template <typename T> void inline SpVector<T>::toFull(Vector<T>& out) const { out.setZeros(); T* X = out.rawX(); for (int i = 0; i<_L; ++i) X[_r[i]]=_v[i]; }; /* **************************** * Implementaton of ProdMatrix * ****************************/ template <typename T> ProdMatrix<T>::ProdMatrix() { _DtX= NULL; _X=NULL; _D=NULL; _high_memory=true; _n=0; _m=0; _addDiag=0; }; /// Constructor. Matrix D'*X is represented template <typename T> ProdMatrix<T>::ProdMatrix(const Matrix<T>& D, const bool high_memory) { if (high_memory) _DtX = new Matrix<T>(); this->setMatrices(D,high_memory); }; /// Constructor. Matrix D'*X is represented template <typename T> ProdMatrix<T>::ProdMatrix(const Matrix<T>& D, const Matrix<T>& X, const bool high_memory) { if (high_memory) _DtX = new Matrix<T>(); this->setMatrices(D,X,high_memory); }; template <typename T> inline void ProdMatrix<T>::setMatrices(const Matrix<T>& D, const Matrix<T>& X, const bool high_memory) { _high_memory=high_memory; _m = D.n(); _n = X.n(); if (high_memory) { D.mult(X,*_DtX,true,false); } else { _X=&X; _D=&D; _DtX=NULL; } _addDiag=0; }; template <typename T> inline void ProdMatrix<T>::setMatrices( const Matrix<T>& D, const bool high_memory) { _high_memory=high_memory; _m = D.n(); _n = D.n(); if (high_memory) { D.XtX(*_DtX); } else { _X=&D; _D=&D; _DtX=NULL; } _addDiag=0; }; /// compute DtX(:,i) template <typename T> inline void ProdMatrix<T>::copyCol(const int i, Vector<T>& DtXi) const { if (_high_memory) { _DtX->copyCol(i,DtXi); } else { Vector<T> Xi; _X->refCol(i,Xi); _D->multTrans(Xi,DtXi); if (_addDiag && _m == _n) DtXi[i] += _addDiag; } }; /// compute DtX(:,i) template <typename T> inline void ProdMatrix<T>::extract_rawCol(const int i,T* DtXi) const { if (_high_memory) { _DtX->extract_rawCol(i,DtXi); } else { Vector<T> Xi; Vector<T> vDtXi(DtXi,_m); _X->refCol(i,Xi); _D->multTrans(Xi,vDtXi); if (_addDiag && _m == _n) DtXi[i] += _addDiag; } }; template <typename T> inline void ProdMatrix<T>::add_rawCol(const int i,T* DtXi, const T a) const { if (_high_memory) { _DtX->add_rawCol(i,DtXi,a); } else { Vector<T> Xi; Vector<T> vDtXi(DtXi,_m); _X->refCol(i,Xi); _D->multTrans(Xi,vDtXi,a,T(1.0)); if (_addDiag && _m == _n) DtXi[i] += a*_addDiag; } }; template <typename T> void inline ProdMatrix<T>::addDiag(const T diag) { if (_m == _n) { if (_high_memory) { _DtX->addDiag(diag); } else { _addDiag=diag; } } }; template <typename T> inline T ProdMatrix<T>::operator[](const int index) const { if (_high_memory) { return (*_DtX)[index]; } else { const int index2=index/this->_m; const int index1=index-this->_m*index2; Vector<T> col1, col2; _D->refCol(index1,col1); _X->refCol(index2,col2); return col1.dot(col2); } }; template <typename T> inline T ProdMatrix<T>::operator()(const int index1, const int index2) const { if (_high_memory) { return (*_DtX)(index1,index2); } else { Vector<T> col1, col2; _D->refCol(index1,col1); _X->refCol(index2,col2); return col1.dot(col2); } }; template <typename T> void inline ProdMatrix<T>::diag(Vector<T>& diag) const { if (_m == _n) { if (_high_memory) { _DtX->diag(diag); } else { Vector<T> col1, col2; for (int i = 0; i <_m; ++i) { _D->refCol(i,col1); _X->refCol(i,col2); diag[i] = col1.dot(col2); } } } }; template <typename T> class SubMatrix : public AbstractMatrix<T> { public: SubMatrix(AbstractMatrix<T>& G, Vector<int>& indI, Vector<int>& indJ); void inline convertIndicesI(Vector<int>& ind) const; void inline convertIndicesJ(Vector<int>& ind) const; int inline n() const { return _indicesJ.n(); }; int inline m() const { return _indicesI.n(); }; void inline extract_rawCol(const int i, T* pr) const; /// compute DtX(:,i) inline void copyCol(const int i, Vector<T>& DtXi) const; /// compute DtX(:,i) inline void add_rawCol(const int i, T* DtXi, const T a) const; /// compute DtX(:,i) inline void diag(Vector<T>& diag) const; inline T operator()(const int index1, const int index2) const; private: Vector<int> _indicesI; Vector<int> _indicesJ; AbstractMatrix<T>* _matrix; }; template <typename T> SubMatrix<T>::SubMatrix(AbstractMatrix<T>& G, Vector<int>& indI, Vector<int>& indJ) { _matrix = &G; _indicesI.copy(indI); _indicesJ.copy(indJ); }; template <typename T> void inline SubMatrix<T>::convertIndicesI( Vector<int>& ind) const { int* pr_ind = ind.rawX(); for (int i = 0; i<ind.n(); ++i) { if (pr_ind[i] == -1) break; pr_ind[i]=_indicesI[pr_ind[i]]; } }; template <typename T> void inline SubMatrix<T>::convertIndicesJ( Vector<int>& ind) const { int* pr_ind = ind.rawX(); for (int i = 0; i<ind.n(); ++i) { if (pr_ind[i] == -1) break; pr_ind[i]=_indicesJ[pr_ind[i]]; } }; template <typename T> void inline SubMatrix<T>::extract_rawCol(const int i, T* pr) const { int* pr_ind=_indicesI.rawX(); int* pr_ind2=_indicesJ.rawX(); for (int j = 0; j<_indicesI.n(); ++j) { pr[j]=(*_matrix)(pr_ind[j],pr_ind2[i]); } }; template <typename T> inline void SubMatrix<T>::copyCol(const int i, Vector<T>& DtXi) const { this->extract_rawCol(i,DtXi.rawX()); }; template <typename T> void inline SubMatrix<T>::add_rawCol(const int i, T* pr, const T a) const { int* pr_ind=_indicesI.rawX(); int* pr_ind2=_indicesJ.rawX(); for (int j = 0; j<_indicesI.n(); ++j) { pr[j]+=a*(*_matrix)(pr_ind[j],pr_ind2[i]); } }; template <typename T> void inline SubMatrix<T>::diag(Vector<T>& diag) const { T* pr = diag.rawX(); int* pr_ind=_indicesI.rawX(); for (int j = 0; j<_indicesI.n(); ++j) { pr[j]=(*_matrix)(pr_ind[j],pr_ind[j]); } }; template <typename T> inline T SubMatrix<T>::operator()(const int index1, const int index2) const { return (*_matrix)(_indicesI[index1],_indicesJ[index2]); } /// Matrix with shifts template <typename T> class ShiftMatrix : public AbstractMatrixB<T> { public: ShiftMatrix(const AbstractMatrixB<T>& inputmatrix, const int shifts, const bool center = false) : _shifts(shifts), _inputmatrix(&inputmatrix), _centered(false) { _m=_inputmatrix->m()-shifts+1; _n=_inputmatrix->n()*shifts; if (center) this->center(); }; int n() const { return _n; }; int m() const { return _m; }; /// b <- alpha A'x + beta b void multTrans(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A*x + beta*b, when x is sparse virtual void mult(const SpVector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; virtual void mult(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform C = a*A*B + b*C, possibly transposing A or B. virtual void mult(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; virtual void mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. virtual void multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// XtX = A'*A virtual void XtX(Matrix<T>& XtX) const; virtual void copyRow(const int i, Vector<T>& x) const; virtual void copyTo(Matrix<T>& copy) const; virtual T dot(const Matrix<T>& x) const; virtual void print(const string& name) const; virtual ~ShiftMatrix() { }; private: void center() { Vector<T> ones(_m); ones.set(T(1.0)/_m); this->multTrans(ones,_means); _centered=true; }; int _m; int _n; int _shifts; bool _centered; Vector<T> _means; const AbstractMatrixB<T>* _inputmatrix; }; template <typename T> void ShiftMatrix<T>::multTrans(const Vector<T>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_n); if (beta==0) b.setZeros(); Vector<T> tmp(_inputmatrix->m()); Vector<T> subvec; Vector<T> subvec2; const int nn=_inputmatrix->n(); for (int i = 0; i<_shifts; ++i) { tmp.setZeros(); subvec2.setData(tmp.rawX()+i,_m); subvec2.copy(x); subvec.setData(b.rawX()+i*nn,nn); _inputmatrix->multTrans(tmp,subvec,alpha,beta); } if (_centered) { b.add(_means,-alpha*x.sum()); } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> void ShiftMatrix<T>::mult(const SpVector<T>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_m); if (beta==0) { b.setZeros(); } else { b.scal(beta); } const int nn=_inputmatrix->n(); const int mm=_inputmatrix->m(); Vector<T> fullx(_n); x.toFull(fullx); SpVector<T> sptmp(nn); Vector<T> tmp; Vector<T> tmp2(mm); for (int i = 0; i<_shifts; ++i) { tmp.setData(fullx.rawX()+i*nn,nn); tmp.toSparse(sptmp); _inputmatrix->mult(sptmp,tmp2,alpha,0); tmp.setData(tmp2.rawX()+i,_m); b.add(tmp); } if (_centered) { b.add(-alpha*_means.dot(x)); } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> void ShiftMatrix<T>::mult(const Vector<T>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_m); const int nn=_inputmatrix->n(); const int mm=_inputmatrix->m(); Vector<T> tmp; Vector<T> tmp2(mm); if (beta==0) { b.setZeros(); } else { b.scal(beta); } for (int i = 0; i<_shifts; ++i) { tmp.setData(x.rawX()+i*nn,nn); _inputmatrix->mult(tmp,tmp2,alpha,0); tmp.setData(tmp2.rawX()+i,_m); b.add(tmp); } if (_centered) { b.add(-alpha*_means.dot(x)); } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T> void ShiftMatrix<T>::mult(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { cerr << "Shift Matrix is used in inadequate setting" << endl; } template <typename T> void ShiftMatrix<T>::mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { cerr << "Shift Matrix is used in inadequate setting" << endl; } /// perform C = a*B*A + b*C, possibly transposing A or B. template <typename T> void ShiftMatrix<T>::multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { cerr << "Shift Matrix is used in inadequate setting" << endl; } template <typename T> void ShiftMatrix<T>::XtX(Matrix<T>& XtX) const { cerr << "Shift Matrix is used in inadequate setting" << endl; }; template <typename T> void ShiftMatrix<T>::copyRow(const int ind, Vector<T>& x) const { Vector<T> sub_vec; const int mm=_inputmatrix->m(); for (int i = 0; i<_shifts; ++i) { sub_vec.setData(x.rawX()+i*mm,mm); _inputmatrix->copyRow(ind+i,sub_vec); } if (_centered) x.sub(_means); }; template <typename T> void ShiftMatrix<T>::copyTo(Matrix<T>& x) const { cerr << "Shift Matrix is used in inadequate setting" << endl; }; template <typename T> T ShiftMatrix<T>::dot(const Matrix<T>& x) const { cerr << "Shift Matrix is used in inadequate setting" << endl; return 0; }; template <typename T> void ShiftMatrix<T>::print(const string& name) const { cerr << name << endl; cerr << "Shift Matrix: " << _shifts << " shifts" << endl; _inputmatrix->print(name); }; /// Matrix with shifts template <typename T> class DoubleRowMatrix : public AbstractMatrixB<T> { public: DoubleRowMatrix(const AbstractMatrixB<T>& inputmatrix) : _inputmatrix(&inputmatrix) { _n=inputmatrix.n(); _m=2*inputmatrix.m(); }; int n() const { return _n; }; int m() const { return _m; }; /// b <- alpha A'x + beta b void multTrans(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A*x + beta*b, when x is sparse virtual void mult(const SpVector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; virtual void mult(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform C = a*A*B + b*C, possibly transposing A or B. virtual void mult(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; virtual void mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. virtual void multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// XtX = A'*A virtual void XtX(Matrix<T>& XtX) const; virtual void copyRow(const int i, Vector<T>& x) const; virtual void copyTo(Matrix<T>& copy) const; virtual T dot(const Matrix<T>& x) const; virtual void print(const string& name) const; virtual ~DoubleRowMatrix() { }; private: int _m; int _n; const AbstractMatrixB<T>* _inputmatrix; }; template <typename T> void DoubleRowMatrix<T>::multTrans(const Vector<T>& x, Vector<T>& b, const T alpha, const T beta) const { const int mm = _inputmatrix->m(); Vector<T> tmp(mm); for (int i = 0; i<mm; ++i) tmp[i]=x[2*i]+x[2*i+1]; _inputmatrix->multTrans(tmp,b,alpha,beta); }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> void DoubleRowMatrix<T>::mult(const SpVector<T>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_m); if (beta==0) { b.setZeros(); } else { b.scal(beta); } const int mm = _inputmatrix->m(); Vector<T> tmp(mm); _inputmatrix->mult(x,tmp,alpha); for (int i = 0; i<mm; ++i) { b[2*i]+=tmp[i]; b[2*i+1]+=tmp[i]; } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> void DoubleRowMatrix<T>::mult(const Vector<T>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_m); if (beta==0) { b.setZeros(); } else { b.scal(beta); } const int mm = _inputmatrix->m(); Vector<T> tmp(mm); _inputmatrix->mult(x,tmp,alpha); for (int i = 0; i<mm; ++i) { b[2*i]+=tmp[i]; b[2*i+1]+=tmp[i]; } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T> void DoubleRowMatrix<T>::mult(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { FLAG(5) cerr << "Double Matrix is used in inadequate setting" << endl; } template <typename T> void DoubleRowMatrix<T>::mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { FLAG(4) cerr << "Double Matrix is used in inadequate setting" << endl; } /// perform C = a*B*A + b*C, possibly transposing A or B. template <typename T> void DoubleRowMatrix<T>::multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { FLAG(3) cerr << "Double Matrix is used in inadequate setting" << endl; } template <typename T> void DoubleRowMatrix<T>::XtX(Matrix<T>& XtX) const { FLAG(2) cerr << "Double Matrix is used in inadequate setting" << endl; }; template <typename T> void DoubleRowMatrix<T>::copyRow(const int ind, Vector<T>& x) const { const int indd2= static_cast<int>(floor(static_cast<double>(ind)/2.0)); _inputmatrix->copyRow(indd2,x); }; template <typename T> void DoubleRowMatrix<T>::copyTo(Matrix<T>& x) const { FLAG(1) cerr << "Double Matrix is used in inadequate setting" << endl; }; template <typename T> T DoubleRowMatrix<T>::dot(const Matrix<T>& x) const { FLAG(0) cerr << "Double Matrix is used in inadequate setting" << endl; return 0; }; template <typename T> void DoubleRowMatrix<T>::print(const string& name) const { cerr << name << endl; cerr << "Double Row Matrix" << endl; _inputmatrix->print(name); }; #endif
fmm_scale_invariant.h
#ifndef fmm_scale_invariant_h #define fmm_scale_invariant_h #include <cstring> // std::memset #include <fstream> // std::ofstream #include <type_traits> // std::is_same #include "fmm_base.h" #include "intrinsics.h" #include "math_wrapper.h" namespace exafmm_t { template <typename T> class FmmScaleInvariant : public FmmBase<T> { /** For the variables from base class that do not template parameter T, * we need to use this-> to tell compilers to lookup nondependent names * in the base class. Eg. p, nsurf, r0, kernel_matrix etc. * https://isocpp.org/wiki/faq/templates#nondependent-name-lookup-members/ */ public: /* precomputation matrices */ std::vector<T> matrix_UC2E_U; //!< First component of the pseudo-inverse of upward check to upward equivalent kernel matrix. std::vector<T> matrix_UC2E_V; //!< Second component of the pseudo-inverse of upward check to upward equivalent kernel matrix. std::vector<T> matrix_DC2E_U; //!< First component of the pseudo-inverse of downward check to downward equivalent kernel matrix. std::vector<T> matrix_DC2E_V; //!< Second component of the pseudo-inverse of downward check to downward equivalent kernel matrix. std::vector<std::vector<T>> matrix_M2M; //!< The pseudo-inverse of M2M kernel matrix. std::vector<std::vector<T>> matrix_L2L; //!< The pseudo-inverse of L2L kernel matrix. std::vector<AlignedVec> matrix_M2L; //!< The pseudo-inverse of M2L kernel matrix. M2LData m2ldata; /* constructors */ FmmScaleInvariant() {} FmmScaleInvariant(int p_, int ncrit_, std::string filename_=std::string()) : FmmBase<T>(p_, ncrit_, filename_) {} /* precomputation */ //! Setup the sizes of precomputation matrices void initialize_matrix() { size_t size = this->nfreq * 2 * NCHILD * NCHILD; // size of each M2L precomputation matrix int& nsurf_ = this->nsurf; matrix_UC2E_U.resize(nsurf_*nsurf_); matrix_UC2E_V.resize(nsurf_*nsurf_); matrix_DC2E_U.resize(nsurf_*nsurf_); matrix_DC2E_V.resize(nsurf_*nsurf_); matrix_M2M.resize(REL_COORD[M2M_Type].size(), std::vector<T>(nsurf_*nsurf_)); matrix_L2L.resize(REL_COORD[L2L_Type].size(), std::vector<T>(nsurf_*nsurf_)); matrix_M2L.resize(REL_COORD[M2L_Type].size(), AlignedVec(size)); } //! Precompute M2M and L2L void precompute_M2M() { int& nsurf_ = this->nsurf; int npos = REL_COORD[M2M_Type].size(); // number of relative positions int level = 0; real_t parent_coord[3] = {0, 0, 0}; RealVec parent_up_check_surf = surface(this->p, this->r0, level, parent_coord, 2.95); real_t s = this->r0 * powf(0.5, level+1); #pragma omp parallel for for (int i=0; i<npos; i++) { // compute kernel matrix ivec3& coord = REL_COORD[M2M_Type][i]; real_t child_coord[3] = {parent_coord[0] + coord[0]*s, parent_coord[1] + coord[1]*s, parent_coord[2] + coord[2]*s}; RealVec child_up_equiv_surf = surface(this->p, this->r0, level+1, child_coord, 1.05); std::vector<T> matrix_pc2ce(nsurf_*nsurf_); this->kernel_matrix(parent_up_check_surf, child_up_equiv_surf, matrix_pc2ce); // M2M std::vector<T> buffer(nsurf_*nsurf_); gemm(nsurf_, nsurf_, nsurf_, &matrix_UC2E_U[0], &matrix_pc2ce[0], &buffer[0]); gemm(nsurf_, nsurf_, nsurf_, &matrix_UC2E_V[0], &buffer[0], &(matrix_M2M[i][0])); // L2L matrix_pc2ce = transpose(matrix_pc2ce, nsurf_, nsurf_); gemm(nsurf_, nsurf_, nsurf_, &matrix_pc2ce[0], &matrix_DC2E_V[0], &buffer[0]); gemm(nsurf_, nsurf_, nsurf_, &buffer[0], &matrix_DC2E_U[0], &(matrix_L2L[i][0])); } } //! Precompute UC2UE and DC2DE matrices void precompute_check2equiv() {} //! Precompute M2L void precompute_M2L() {} //! Save precomputation matrices void save_matrix() { std::remove(this->filename.c_str()); std::ofstream file(this->filename, std::ofstream::binary); // r0 file.write(reinterpret_cast<char*>(&this->r0), sizeof(real_t)); size_t size = this->nsurf * this->nsurf; // UC2E, DC2E file.write(reinterpret_cast<char*>(&matrix_UC2E_U[0]), size*sizeof(T)); file.write(reinterpret_cast<char*>(&matrix_UC2E_V[0]), size*sizeof(T)); file.write(reinterpret_cast<char*>(&matrix_DC2E_U[0]), size*sizeof(T)); file.write(reinterpret_cast<char*>(&matrix_DC2E_V[0]), size*sizeof(T)); // M2M, L2L for (auto & vec : matrix_M2M) { file.write(reinterpret_cast<char*>(&vec[0]), size*sizeof(T)); } for (auto & vec : matrix_L2L) { file.write(reinterpret_cast<char*>(&vec[0]), size*sizeof(T)); } // M2L size = this->nfreq * 2 * NCHILD * NCHILD; for (auto & vec : matrix_M2L) { file.write(reinterpret_cast<char*>(&vec[0]), size*sizeof(real_t)); } file.close(); } //! Check and load precomputation matrices void load_matrix() { size_t size_M2L = this->nfreq * 2 * NCHILD * NCHILD; size_t file_size = (2*REL_COORD[M2M_Type].size()+4) * this->nsurf * this->nsurf * sizeof(T) + REL_COORD[M2L_Type].size() * size_M2L * sizeof(real_t) + 1 * sizeof(real_t); // +1 denotes r0 std::ifstream file(this->filename, std::ifstream::binary); if (file.good()) { file.seekg(0, file.end); if (size_t(file.tellg()) == file_size) { // if file size is correct file.seekg(0, file.beg); // move the position back to the beginning real_t r0_; file.read(reinterpret_cast<char*>(&r0_), sizeof(real_t)); if (this->r0 == r0_) { // if radius match size_t size = this->nsurf * this->nsurf; // UC2E, DC2E file.read(reinterpret_cast<char*>(&matrix_UC2E_U[0]), size*sizeof(T)); file.read(reinterpret_cast<char*>(&matrix_UC2E_V[0]), size*sizeof(T)); file.read(reinterpret_cast<char*>(&matrix_DC2E_U[0]), size*sizeof(T)); file.read(reinterpret_cast<char*>(&matrix_DC2E_V[0]), size*sizeof(T)); // M2M, L2L for (auto & vec : matrix_M2M) { file.read(reinterpret_cast<char*>(&vec[0]), size*sizeof(T)); } for (auto & vec : matrix_L2L) { file.read(reinterpret_cast<char*>(&vec[0]), size*sizeof(T)); } // M2L for (auto & vec : matrix_M2L) { file.read(reinterpret_cast<char*>(&vec[0]), size_M2L*sizeof(real_t)); } this->is_precomputed = true; } } } file.close(); } //! Precompute void precompute() { initialize_matrix(); load_matrix(); if (!this->is_precomputed) { precompute_check2equiv(); precompute_M2M(); precompute_M2L(); save_matrix(); } } //! P2M operator void P2M(NodePtrs<T>& leafs) { int& nsurf_ = this->nsurf; real_t c[3] = {0,0,0}; std::vector<RealVec> up_check_surf; up_check_surf.resize(this->depth+1); for (int level=0; level<=this->depth; level++) { up_check_surf[level].resize(nsurf_*3); up_check_surf[level] = surface(this->p, this->r0, level, c, 2.95); } #pragma omp parallel for for (size_t i=0; i<leafs.size(); i++) { Node<T>* leaf = leafs[i]; int level = leaf->level; real_t scale = pow(0.5, level); // scaling factor of UC2UE precomputation matrix // calculate upward check potential induced by sources' charges RealVec check_coord(nsurf_*3); for (int k=0; k<nsurf_; k++) { check_coord[3*k+0] = up_check_surf[level][3*k+0] + leaf->x[0]; check_coord[3*k+1] = up_check_surf[level][3*k+1] + leaf->x[1]; check_coord[3*k+2] = up_check_surf[level][3*k+2] + leaf->x[2]; } this->potential_P2P(leaf->src_coord, leaf->src_value, check_coord, leaf->up_equiv); // convert upward check potential to upward equivalent charge std::vector<T> buffer(nsurf_); std::vector<T> equiv(nsurf_); gemv(nsurf_, nsurf_, &matrix_UC2E_U[0], &(leaf->up_equiv[0]), &buffer[0]); gemv(nsurf_, nsurf_, &matrix_UC2E_V[0], &buffer[0], &equiv[0]); // scale the check-to-equivalent conversion (precomputation) for (int k=0; k<nsurf_; k++) leaf->up_equiv[k] = scale * equiv[k]; } } //! L2P operator void L2P(NodePtrs<T>& leafs) { int& nsurf_ = this->nsurf; real_t c[3] = {0.0}; std::vector<RealVec> dn_equiv_surf; dn_equiv_surf.resize(this->depth+1); for (int level=0; level<=this->depth; level++) { dn_equiv_surf[level].resize(nsurf_*3); dn_equiv_surf[level] = surface(this->p, this->r0, level, c, 2.95); } #pragma omp parallel for for (size_t i=0; i<leafs.size(); i++) { Node<T>* leaf = leafs[i]; int level = leaf->level; real_t scale = pow(0.5, level); // convert downward check potential to downward equivalent charge std::vector<T> buffer(nsurf_); std::vector<T> equiv(nsurf_); gemv(nsurf_, nsurf_, &matrix_DC2E_U[0], &(leaf->dn_equiv[0]), &buffer[0]); gemv(nsurf_, nsurf_, &matrix_DC2E_V[0], &buffer[0], &equiv[0]); // scale the check-to-equivalent conversion (precomputation) for (int k=0; k<nsurf_; k++) leaf->dn_equiv[k] = scale * equiv[k]; // calculate targets' potential & gradient induced by downward equivalent charge RealVec equiv_coord(nsurf_*3); for (int k=0; k<nsurf_; k++) { equiv_coord[3*k+0] = dn_equiv_surf[level][3*k+0] + leaf->x[0]; equiv_coord[3*k+1] = dn_equiv_surf[level][3*k+1] + leaf->x[1]; equiv_coord[3*k+2] = dn_equiv_surf[level][3*k+2] + leaf->x[2]; } this->gradient_P2P(equiv_coord, leaf->dn_equiv, leaf->trg_coord, leaf->trg_value); } } //! M2M operator void M2M(Node<T>* node) { int& nsurf_ = this->nsurf; if (node->is_leaf) return; for (int octant=0; octant<8; octant++) { if (node->children[octant]) #pragma omp task untied M2M(node->children[octant]); } #pragma omp taskwait // evaluate parent's upward equivalent charge from child's upward equivalent charge for (int octant=0; octant<8; octant++) { if (node->children[octant]) { Node<T>* child = node->children[octant]; std::vector<T> buffer(nsurf_); gemv(nsurf_, nsurf_, &(matrix_M2M[octant][0]), &child->up_equiv[0], &buffer[0]); for (int k=0; k<nsurf_; k++) { node->up_equiv[k] += buffer[k]; } } } } //! L2L operator void L2L(Node<T>* node) { int& nsurf_ = this->nsurf; if (node->is_leaf) return; // evaluate child's downward check potential from parent's downward check potential for (int octant=0; octant<8; octant++) { if (node->children[octant]) { Node<T>* child = node->children[octant]; std::vector<T> buffer(nsurf_); gemv(nsurf_, nsurf_, &(matrix_L2L[octant][0]), &node->dn_equiv[0], &buffer[0]); for (int k=0; k<nsurf_; k++) child->dn_equiv[k] += buffer[k]; } } for (int octant=0; octant<8; octant++) { if (node->children[octant]) #pragma omp task untied L2L(node->children[octant]); } #pragma omp taskwait } void M2L_setup(NodePtrs<T> nonleafs) { int& nsurf_ = this->nsurf; int npos = REL_COORD[M2L_Type].size(); // number of M2L relative positions // construct lists of source nodes and target nodes for M2L operator NodePtrs<T>& trg_nodes = nonleafs; std::set<Node<T>*> src_nodes_; for (size_t i=0; i<trg_nodes.size(); i++) { NodePtrs<T>& M2L_list = trg_nodes[i]->M2L_list; for (int k=0; k<npos; k++) { if (M2L_list[k]) { src_nodes_.insert(M2L_list[k]); } } } NodePtrs<T> src_nodes; auto it = src_nodes_.begin(); for (; it!=src_nodes_.end(); it++) { src_nodes.push_back(*it); } // prepare the indices of src_nodes & trg_nodes in all_up_equiv & all_dn_equiv std::vector<size_t> fft_offset(src_nodes.size()); std::vector<size_t> ifft_offset(trg_nodes.size()); RealVec ifft_scale(trg_nodes.size()); for (size_t i=0; i<src_nodes.size(); i++) { fft_offset[i] = src_nodes[i]->children[0]->idx * nsurf_; } for (size_t i=0; i<trg_nodes.size(); i++) { int level = trg_nodes[i]->level+1; ifft_offset[i] = trg_nodes[i]->children[0]->idx * nsurf_; ifft_scale[i] = powf(2.0, level); } // calculate interaction_offset_f & interaction_count_offset std::vector<size_t> interaction_offset_f; std::vector<size_t> interaction_count_offset; for (size_t i=0; i<src_nodes.size(); i++) { src_nodes[i]->idx_M2L = i; } size_t nblk_trg = trg_nodes.size() * sizeof(real_t) / CACHE_SIZE; if (nblk_trg==0) nblk_trg = 1; size_t interaction_count_offset_ = 0; size_t fft_size = 2 * NCHILD * this->nfreq; for (size_t iblk_trg=0; iblk_trg<nblk_trg; iblk_trg++) { size_t blk_start = (trg_nodes.size()* iblk_trg ) / nblk_trg; size_t blk_end = (trg_nodes.size()*(iblk_trg+1)) / nblk_trg; for (int k=0; k<npos; k++) { for (size_t i=blk_start; i<blk_end; i++) { NodePtrs<T>& M2L_list = trg_nodes[i]->M2L_list; if (M2L_list[k]) { interaction_offset_f.push_back(M2L_list[k]->idx_M2L * fft_size); interaction_offset_f.push_back( i * fft_size); interaction_count_offset_++; } } interaction_count_offset.push_back(interaction_count_offset_); } } m2ldata.fft_offset = fft_offset; m2ldata.ifft_offset = ifft_offset; m2ldata.ifft_scale = ifft_scale; m2ldata.interaction_offset_f = interaction_offset_f; m2ldata.interaction_count_offset = interaction_count_offset; } void hadamard_product(std::vector<size_t>& interaction_count_offset, std::vector<size_t>& interaction_offset_f, AlignedVec& fft_in, AlignedVec& fft_out) { size_t fft_size = 2 * NCHILD * this->nfreq; AlignedVec zero_vec0(fft_size, 0.); AlignedVec zero_vec1(fft_size, 0.); size_t npos = matrix_M2L.size(); size_t nblk_inter = interaction_count_offset.size(); // num of blocks of interactions size_t nblk_trg = nblk_inter / npos; // num of blocks based on trg_nodes int BLOCK_SIZE = CACHE_SIZE * 2 / sizeof(real_t); std::vector<real_t*> IN_(BLOCK_SIZE*nblk_inter); std::vector<real_t*> OUT_(BLOCK_SIZE*nblk_inter); // initialize fft_out with zero #pragma omp parallel for for (size_t i=0; i<fft_out.capacity()/fft_size; ++i) { std::memset(fft_out.data()+i*fft_size, 0, fft_size*sizeof(real_t)); } #pragma omp parallel for for (size_t iblk_inter=0; iblk_inter<nblk_inter; iblk_inter++) { size_t interaction_count_offset0 = (iblk_inter==0 ? 0 : interaction_count_offset[iblk_inter-1]); size_t interaction_count_offset1 = interaction_count_offset[iblk_inter] ; size_t interact_count = interaction_count_offset1-interaction_count_offset0; for (size_t j=0; j<interact_count; j++) { IN_ [BLOCK_SIZE*iblk_inter+j] = &fft_in[interaction_offset_f[(interaction_count_offset0+j)*2+0]]; OUT_[BLOCK_SIZE*iblk_inter+j] = &fft_out[interaction_offset_f[(interaction_count_offset0+j)*2+1]]; } IN_ [BLOCK_SIZE*iblk_inter+interact_count] = &zero_vec0[0]; OUT_[BLOCK_SIZE*iblk_inter+interact_count] = &zero_vec1[0]; } for (size_t iblk_trg=0; iblk_trg<nblk_trg; iblk_trg++) { #pragma omp parallel for for (int k=0; k<this->nfreq; k++) { for (size_t ipos=0; ipos< npos; ipos++) { size_t iblk_inter = iblk_trg*npos+ipos; size_t interaction_count_offset0 = (iblk_inter==0 ? 0 : interaction_count_offset[iblk_inter-1]); size_t interaction_count_offset1 = interaction_count_offset[iblk_inter] ; size_t interaction_count = interaction_count_offset1 - interaction_count_offset0; real_t** IN = &IN_[BLOCK_SIZE*iblk_inter]; real_t** OUT= &OUT_[BLOCK_SIZE*iblk_inter]; real_t* M = &matrix_M2L[ipos][k*2*NCHILD*NCHILD]; // k-th freq's (row) offset in matrix_M2L[ipos] for (size_t j=0; j<interaction_count; j+=2) { real_t* M_ = M; real_t* IN0 = IN [j+0] + k*NCHILD*2; // go to k-th freq chunk real_t* IN1 = IN [j+1] + k*NCHILD*2; real_t* OUT0 = OUT[j+0] + k*NCHILD*2; real_t* OUT1 = OUT[j+1] + k*NCHILD*2; matmult_8x8x2(M_, IN0, IN1, OUT0, OUT1); } } } } // add flop add_flop((long long)(8*8*8)*(interaction_offset_f.size()/2)*this->nfreq); } void fft_up_equiv(std::vector<size_t>& fft_offset, RealVec& all_up_equiv, AlignedVec& fft_in) {} void ifft_dn_check(std::vector<size_t>& ifft_offset, RealVec& ifft_scal, AlignedVec& fft_out, RealVec& all_dn_equiv) {} void M2L(Nodes<T>& nodes) { int& nsurf_ = this->nsurf; size_t fft_size = 2 * NCHILD * this->nfreq; int nnodes = nodes.size(); // allocate memory std::vector<T> all_up_equiv, all_dn_equiv; all_up_equiv.reserve(nnodes*nsurf_); // use reserve() to avoid the overhead of calling constructor all_dn_equiv.reserve(nnodes*nsurf_); // use pointer instead of iterator to access elements AlignedVec fft_in, fft_out; fft_in.reserve(m2ldata.fft_offset.size()*fft_size); fft_out.reserve(m2ldata.ifft_offset.size()*fft_size); // gather all upward equivalent charges #pragma omp parallel for collapse(2) for (int i=0; i<nnodes; i++) { for (int j=0; j<nsurf_; j++) { all_up_equiv[i*nsurf_+j] = nodes[i].up_equiv[j]; all_dn_equiv[i*nsurf_+j] = nodes[i].dn_equiv[j]; } } fft_up_equiv(m2ldata.fft_offset, all_up_equiv, fft_in); hadamard_product(m2ldata.interaction_count_offset, m2ldata.interaction_offset_f, fft_in, fft_out); ifft_dn_check(m2ldata.ifft_offset, m2ldata.ifft_scale, fft_out, all_dn_equiv); // scatter all downward check potentials #pragma omp parallel for collapse(2) for (int i=0; i<nnodes; i++) { for (int j=0; j<nsurf_; j++) { nodes[i].dn_equiv[j] = all_dn_equiv[i*nsurf_+j]; } } } }; /** Below are member function specializations */ template <> void FmmScaleInvariant<real_t>::precompute_check2equiv() { int level = 0; real_t c[3] = {0, 0, 0}; int& nsurf_ = this->nsurf; // compute kernel matrix RealVec up_check_surf = surface(this->p, this->r0, level, c, 2.95); RealVec up_equiv_surf = surface(this->p, this->r0, level, c, 1.05); RealVec matrix_c2e(nsurf_*nsurf_); // UC2UE this->kernel_matrix(up_check_surf, up_equiv_surf, matrix_c2e); // svd RealVec S(nsurf_*nsurf_); // singular values RealVec U(nsurf_*nsurf_), VH(nsurf_*nsurf_); svd(nsurf_, nsurf_, &matrix_c2e[0], &S[0], &U[0], &VH[0]); // pseudo-inverse real_t max_S = 0; for (int i=0; i<nsurf_; i++) { max_S = fabs(S[i*nsurf_+i])>max_S ? fabs(S[i*nsurf_+i]) : max_S; } for (int i=0; i<nsurf_; i++) { S[i*nsurf_+i] = S[i*nsurf_+i]>EPS*max_S*4 ? 1.0/S[i*nsurf_+i] : 0.0; } RealVec V = transpose(VH, nsurf_, nsurf_); matrix_UC2E_U = transpose(U, nsurf_, nsurf_); gemm(nsurf_, nsurf_, nsurf_, &V[0], &S[0], &matrix_UC2E_V[0]); matrix_DC2E_U = VH; gemm(nsurf_, nsurf_, nsurf_, &U[0], &S[0], &matrix_DC2E_V[0]); } template <> void FmmScaleInvariant<complex_t>::precompute_check2equiv() { int level = 0; real_t c[3] = {0, 0, 0}; int& nsurf_ = this->nsurf; // compute kernel matrix RealVec up_check_surf = surface(this->p, this->r0, level, c, 2.95); RealVec up_equiv_surf = surface(this->p, this->r0, level, c, 1.05); ComplexVec matrix_c2e(nsurf_*nsurf_); // UC2UE this->kernel_matrix(up_check_surf, up_equiv_surf, matrix_c2e); // svd RealVec S(nsurf_*nsurf_); // singular values ComplexVec U(nsurf_*nsurf_), VH(nsurf_*nsurf_); svd(nsurf_, nsurf_, &matrix_c2e[0], &S[0], &U[0], &VH[0]); // pseudo-inverse real_t max_S = 0; for (int i=0; i<nsurf_; i++) { max_S = fabs(S[i*nsurf_+i])>max_S ? fabs(S[i*nsurf_+i]) : max_S; } for (int i=0; i<nsurf_; i++) { S[i*nsurf_+i] = S[i*nsurf_+i]>EPS*max_S*4 ? 1.0/S[i*nsurf_+i] : 0.0; } ComplexVec S_(nsurf_*nsurf_); for (size_t i=0; i<S_.size(); i++) { // convert S to complex type S_[i] = S[i]; } ComplexVec V = conjugate_transpose(VH, nsurf_, nsurf_); ComplexVec UH = conjugate_transpose(U, nsurf_, nsurf_); matrix_UC2E_U = UH; gemm(nsurf_, nsurf_, nsurf_, &V[0], &S_[0], &matrix_UC2E_V[0]); matrix_DC2E_U = transpose(V, nsurf_, nsurf_); ComplexVec UHT = transpose(UH, nsurf_, nsurf_); gemm(nsurf_, nsurf_, nsurf_, &UHT[0], &S_[0], &matrix_DC2E_V[0]); } //! member function specialization for real type template <> void FmmScaleInvariant<real_t>::precompute_M2L() { int n1 = this->p * 2; int& nconv_ = this->nconv; int& nfreq_ = this->nfreq; std::vector<RealVec> matrix_M2L_Helper(REL_COORD[M2L_Helper_Type].size(), RealVec(2*nfreq_)); // create fft plan RealVec fftw_in(nconv_); RealVec fftw_out(2*nfreq_); int dim[3] = {n1, n1, n1}; fft_plan plan = fft_plan_dft_r2c(3, dim, fftw_in.data(), reinterpret_cast<fft_complex*>(fftw_out.data()), FFTW_ESTIMATE); // compute M2L kernel matrix, perform DFT RealVec trg_coord(3,0); #pragma omp parallel for for (size_t i=0; i<REL_COORD[M2L_Helper_Type].size(); ++i) { real_t coord[3]; for (int d=0; d<3; d++) { coord[d] = REL_COORD[M2L_Helper_Type][i][d] * this->r0 / 0.5; // relative coords } RealVec conv_coord = convolution_grid(this->p, this->r0, 0, coord); // convolution grid RealVec conv_value(nconv_); // potentials on convolution grid this->kernel_matrix(conv_coord, trg_coord, conv_value); fft_execute_dft_r2c(plan, conv_value.data(), reinterpret_cast<fft_complex*>(matrix_M2L_Helper[i].data())); } // convert M2L_Helper to M2L and reorder data layout to improve locality #pragma omp parallel for for (size_t i=0; i<REL_COORD[M2L_Type].size(); ++i) { for (int j=0; j<NCHILD*NCHILD; j++) { // loop over child's relative positions int child_rel_idx = M2L_INDEX_MAP[i][j]; if (child_rel_idx != -1) { for (int k=0; k<nfreq_; k++) { // loop over frequencies int new_idx = k*(2*NCHILD*NCHILD) + 2*j; matrix_M2L[i][new_idx+0] = matrix_M2L_Helper[child_rel_idx][k*2+0] / nconv_; // real matrix_M2L[i][new_idx+1] = matrix_M2L_Helper[child_rel_idx][k*2+1] / nconv_; // imag } } } } // destroy fftw plan fft_destroy_plan(plan); } template <> void FmmScaleInvariant<real_t>::fft_up_equiv(std::vector<size_t>& fft_offset, RealVec& all_up_equiv, AlignedVec& fft_in) { int& nsurf_ = this->nsurf; int& nconv_ = this->nconv; int& nfreq_ = this->nfreq; int n1 = 2 * this->p; auto map = generate_surf2conv_up(this->p); size_t fft_size = 2 * NCHILD * nfreq_; AlignedVec fftw_in(nconv_ * NCHILD); AlignedVec fftw_out(fft_size); int dim[3] = {n1, n1, n1}; fft_plan plan = fft_plan_many_dft_r2c(3, dim, NCHILD, (real_t*)&fftw_in[0], nullptr, 1, nconv_, (fft_complex*)(&fftw_out[0]), nullptr, 1, nfreq_, FFTW_ESTIMATE); #pragma omp parallel for for (size_t node_idx=0; node_idx<fft_offset.size(); node_idx++) { RealVec buffer(fft_size, 0); real_t* up_equiv = &all_up_equiv[fft_offset[node_idx]]; // offset ptr of node's 8 child's upward_equiv in all_up_equiv, size=8*nsurf_ // upward_equiv_fft (input of r2c) here should have a size of N3*NCHILD // the node_idx's chunk of fft_out has a size of 2*N3_*NCHILD // since it's larger than what we need, we can use fft_out as fftw_in buffer here real_t* up_equiv_f = &fft_in[fft_size*node_idx]; // offset ptr of node_idx in fft_in vector, size=fft_size std::memset(up_equiv_f, 0, fft_size*sizeof(real_t)); // initialize fft_in to 0 for (int k=0; k<nsurf_; k++) { size_t idx = map[k]; for (int j=0; j<NCHILD; j++) up_equiv_f[idx+j*nconv_] = up_equiv[j*nsurf_+k]; } fft_execute_dft_r2c(plan, up_equiv_f, (fft_complex*)&buffer[0]); // add flop double add, mul, fma; fft_flops(plan, &add, &mul, &fma); add_flop((long long)(add + mul + 2*fma)); for (int k=0; k<nfreq_; k++) { for (int j=0; j<NCHILD; j++) { up_equiv_f[2*(NCHILD*k+j)+0] = buffer[2*(nfreq_*j+k)+0]; up_equiv_f[2*(NCHILD*k+j)+1] = buffer[2*(nfreq_*j+k)+1]; } } } fft_destroy_plan(plan); } template <> void FmmScaleInvariant<real_t>::ifft_dn_check(std::vector<size_t>& ifft_offset, RealVec& ifft_scal, AlignedVec& fft_out, RealVec& all_dn_equiv) { int& nsurf_ = this->nsurf; int& nconv_ = this->nconv; int& nfreq_ = this->nfreq; int n1 = 2 * this->p; auto map = generate_surf2conv_dn(this->p); size_t fft_size = 2 * NCHILD * nfreq_; AlignedVec fftw_in(fft_size); AlignedVec fftw_out(nconv_ * NCHILD); int dim[3] = {n1, n1, n1}; fft_plan plan = fft_plan_many_dft_c2r(3, dim, NCHILD, (fft_complex*)&fftw_in[0], nullptr, 1, nfreq_, (real_t*)(&fftw_out[0]), nullptr, 1, nconv_, FFTW_ESTIMATE); #pragma omp parallel for for (size_t node_idx=0; node_idx<ifft_offset.size(); node_idx++) { RealVec buffer0(fft_size, 0); RealVec buffer1(fft_size, 0); real_t* dn_check_f = &fft_out[fft_size*node_idx]; // offset ptr for node_idx in fft_out vector, size=fft_size real_t* dn_equiv = &all_dn_equiv[ifft_offset[node_idx]]; // offset ptr for node_idx's child's dn_equiv in all_dn_equiv, size=numChilds * nsurf_ for (int k=0; k<nfreq_; k++) for (int j=0; j<NCHILD; j++) { buffer0[2*(nfreq_*j+k)+0] = dn_check_f[2*(NCHILD*k+j)+0]; buffer0[2*(nfreq_*j+k)+1] = dn_check_f[2*(NCHILD*k+j)+1]; } fft_execute_dft_c2r(plan, (fft_complex*)&buffer0[0], (real_t*)&buffer1[0]); // add flop double add, mul, fma; fft_flops(plan, &add, &mul, &fma); add_flop((long long)(add + mul + 2*fma)); for (int k=0; k<nsurf_; k++) { size_t idx = map[k]; for (int j=0; j<NCHILD; j++) dn_equiv[nsurf_*j+k] += buffer1[idx+j*nconv_] * ifft_scal[node_idx]; } } fft_destroy_plan(plan); } } // end namespace #endif
dz1z3.c
// bmp.h #include "stdio.h" #include "stdlib.h" typedef struct{ unsigned char B; unsigned char G; unsigned char R; } RGB; typedef struct { unsigned int filesz; unsigned short creator1; unsigned short creator2; unsigned int bmp_offset; } bmpfile_header_t; typedef struct { unsigned int header_sz; unsigned int width; unsigned int height; unsigned short nplanes; unsigned short bitspp; unsigned int compress_type; unsigned int bmp_bytesz; unsigned int hres; unsigned int vres; unsigned int ncolors; unsigned int nimpcolors; } bmp_dib_header_t; typedef enum { BI_RGB = 0, BI_RLE8, BI_RLE4, BI_BITFIELDS, BI_JPEG, BI_PNG, } bmp_compression_method_t; typedef struct{ unsigned char magic[2]; bmpfile_header_t file_header; bmp_dib_header_t dib_header; unsigned int* palette; void* pixel_map; } bmp_image; void create_bmp(RGB* bitmap, int height, int width, const char* filename){ bmp_image image; int padded_width = 4*(((width*24)+31)/32); padded_width -= width*sizeof(RGB); char* pad = (char*) calloc (padded_width, sizeof(char)); image.magic[0]='B'; image.magic[1]='M'; image.file_header.filesz = 2*sizeof(char) + sizeof(bmpfile_header_t) + sizeof(bmp_dib_header_t) + height*width*sizeof(RGB); image.file_header.creator1 = image.file_header.creator2 = 0; image.file_header.bmp_offset = 2*sizeof(char) + sizeof(bmpfile_header_t) + sizeof(bmp_dib_header_t); image.dib_header.header_sz = 40;//sizeof(bmp_dib_header_t); image.dib_header.width = width; image.dib_header.height = height; image.dib_header.nplanes = 1; image.dib_header.bitspp = 24; image.dib_header.compress_type = 0; image.dib_header.bmp_bytesz = width*height*sizeof(RGB); image.dib_header.hres = 0; image.dib_header.vres = 0; image.dib_header.ncolors = 0; image.dib_header.nimpcolors = 0; FILE* out_file = fopen(filename,"wb"); fwrite(image.magic,sizeof(char),2,out_file); fwrite(&(image.file_header),sizeof(char),sizeof(bmpfile_header_t),out_file); fwrite(&(image.dib_header),sizeof(char),sizeof(bmp_dib_header_t),out_file); int h; for (h = height-1; h >= 0; h--){ fwrite(&bitmap[h*width],sizeof(RGB),width,out_file); fwrite(pad,sizeof(char),padded_width,out_file); } fclose(out_file); } // end bmp.h // utils.h #ifndef _HEADER #define _HEADER #ifdef __cplusplus extern "C" { #endif #include <unistd.h> /* Command line parameters for benchmarks */ struct pb_Parameters { char *outFile; /* If not NULL, the raw output of the * computation should be saved to this * file. The string is owned. */ char **inpFiles; /* A NULL-terminated array of strings * holding the input file(s) for the * computation. The array and strings * are owned. */ }; /* Read command-line parameters. * * The argc and argv parameters to main are read, and any parameters * interpreted by this function are removed from the argument list. * * A new instance of struct pb_Parameters is returned. * If there is an error, then an error message is printed on stderr * and NULL is returned. */ struct pb_Parameters * pb_ReadParameters(int *_argc, char **argv); /* Free an instance of struct pb_Parameters. */ void pb_FreeParameters(struct pb_Parameters *p); /* Count the number of input files in a pb_Parameters instance. */ int pb_Parameters_CountInputs(struct pb_Parameters *p); /* A time or duration. */ #if _POSIX_VERSION >= 200112L typedef unsigned long long pb_Timestamp; /* time in microseconds */ #else # error "Timestamps not implemented" #endif enum pb_TimerState { pb_Timer_STOPPED, pb_Timer_RUNNING, }; struct pb_Timer { enum pb_TimerState state; pb_Timestamp elapsed; /* Amount of time elapsed so far */ pb_Timestamp init; /* Beginning of the current time interval, * if state is RUNNING. End of the last * recorded time interfal otherwise. */ }; /* Reset a timer. * Use this to initialize a timer or to clear * its elapsed time. The reset timer is stopped. */ void pb_ResetTimer(struct pb_Timer *timer); /* Start a timer. The timer is set to RUNNING mode and * time elapsed while the timer is running is added to * the timer. * The timer should not already be running. */ void pb_StartTimer(struct pb_Timer *timer); /* Stop a timer. * This stops adding elapsed time to the timer. * The timer should not already be stopped. */ void pb_StopTimer(struct pb_Timer *timer); /* Get the elapsed time in seconds. */ double pb_GetElapsedTime(struct pb_Timer *timer); /* Execution time is assigned to one of these categories. */ enum pb_TimerID { pb_TimerID_NONE = 0, pb_TimerID_IO, /* Time spent in input/output */ pb_TimerID_KERNEL, /* Time spent computing on the device, * recorded asynchronously */ pb_TimerID_COPY, /* Time spent synchronously moving data * to/from device and allocating/freeing * memory on the device */ pb_TimerID_DRIVER, /* Time spent in the host interacting with the * driver, primarily for recording the time * spent queueing asynchronous operations */ pb_TimerID_COPY_ASYNC, /* Time spent in asynchronous transfers */ pb_TimerID_COMPUTE, /* Time for all program execution other * than parsing command line arguments, * I/O, kernel, and copy */ pb_TimerID_OVERLAP, /* Time double-counted in asynchronous and * host activity: automatically filled in, * not intended for direct usage */ pb_TimerID_LAST /* Number of timer IDs */ }; /* Dynamic list of asynchronously tracked times between events */ struct pb_async_time_marker_list { char *label; // actually just a pointer to a string enum pb_TimerID timerID; /* The ID to which the interval beginning * with this marker should be attributed */ void * marker; //cudaEvent_t marker; /* The driver event for this marker */ struct pb_async_time_marker_list *next; }; struct pb_SubTimer { char *label; struct pb_Timer timer; struct pb_SubTimer *next; }; struct pb_SubTimerList { struct pb_SubTimer *current; struct pb_SubTimer *subtimer_list; }; /* A set of timers for recording execution times. */ struct pb_TimerSet { enum pb_TimerID current; struct pb_async_time_marker_list* async_markers; pb_Timestamp async_begin; pb_Timestamp wall_begin; struct pb_Timer timers[pb_TimerID_LAST]; struct pb_SubTimerList *sub_timer_list[pb_TimerID_LAST]; }; /* Reset all timers in the set. */ void pb_InitializeTimerSet(struct pb_TimerSet *timers); void pb_AddSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID pb_Category); /* Select which timer the next interval of time should be accounted * to. The selected timer is started and other timers are stopped. * Using pb_TimerID_NONE stops all timers. */ void pb_SwitchToTimer(struct pb_TimerSet *timers, enum pb_TimerID timer); void pb_SwitchToSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID category); /* Print timer values to standard output. */ void pb_PrintTimerSet(struct pb_TimerSet *timers, double *time); /* Release timer resources */ void pb_DestroyTimerSet(struct pb_TimerSet * timers); void pb_SetOpenCL(void *clContextPtr, void *clCommandQueuePtr); #ifdef __cplusplus } #endif #endif // end utils.h // utils.c #include <stdlib.h> #include <string.h> #include <stdio.h> #if _POSIX_VERSION >= 200112L # include <sys/time.h> #endif /* Free an array of owned strings. */ static void free_string_array(char **string_array) { char **p; if (!string_array) return; for (p = string_array; *p; p++) free(*p); free(string_array); } /* Parse a comma-delimited list of strings into an * array of strings. */ static char ** read_string_array(char *in) { char **ret; int i; int count; /* Number of items in the input */ char *substring; /* Current substring within 'in' */ /* Count the number of items in the string */ count = 1; for (i = 0; in[i]; i++) if (in[i] == ',') count++; /* Allocate storage */ ret = (char **)malloc((count + 1) * sizeof(char *)); /* Create copies of the strings from the list */ substring = in; for (i = 0; i < count; i++) { char *substring_end; int substring_length; /* Find length of substring */ for (substring_end = substring; (*substring_end != ',') && (*substring_end != 0); substring_end++); substring_length = substring_end - substring; /* Allocate memory and copy the substring */ ret[i] = (char *)malloc(substring_length + 1); memcpy(ret[i], substring, substring_length); ret[i][substring_length] = 0; /* go to next substring */ substring = substring_end + 1; } ret[i] = NULL; /* Write the sentinel value */ return ret; } struct argparse { int argc; /* Number of arguments. Mutable. */ char **argv; /* Argument values. Immutable. */ int argn; /* Current argument number. */ char **argv_get; /* Argument value being read. */ char **argv_put; /* Argument value being written. * argv_put <= argv_get. */ }; static void initialize_argparse(struct argparse *ap, int argc, char **argv) { ap->argc = argc; ap->argn = 0; ap->argv_get = ap->argv_put = ap->argv = argv; } static void finalize_argparse(struct argparse *ap) { /* Move the remaining arguments */ for(; ap->argn < ap->argc; ap->argn++) *ap->argv_put++ = *ap->argv_get++; } /* Delete the current argument. */ static void delete_argument(struct argparse *ap) { if (ap->argn >= ap->argc) { fprintf(stderr, "delete_argument\n"); } ap->argc--; ap->argv_get++; } /* Go to the next argument. Also, move the current argument to its * final location in argv. */ static void next_argument(struct argparse *ap) { if (ap->argn >= ap->argc) { fprintf(stderr, "next_argument\n"); } /* Move argument to its new location. */ *ap->argv_put++ = *ap->argv_get++; ap->argn++; } static int is_end_of_arguments(struct argparse *ap) { return ap->argn == ap->argc; } static char * get_argument(struct argparse *ap) { return *ap->argv_get; } static char * consume_argument(struct argparse *ap) { char *ret = get_argument(ap); delete_argument(ap); return ret; } struct pb_Parameters * pb_ReadParameters(int *_argc, char **argv) { char *err_message; struct argparse ap; struct pb_Parameters *ret = (struct pb_Parameters *)malloc(sizeof(struct pb_Parameters)); /* Initialize the parameters structure */ ret->outFile = NULL; ret->inpFiles = (char **)malloc(sizeof(char *)); ret->inpFiles[0] = NULL; /* Each argument */ initialize_argparse(&ap, *_argc, argv); while(!is_end_of_arguments(&ap)) { char *arg = get_argument(&ap); /* Single-character flag */ if ((arg[0] == '-') && (arg[1] != 0) && (arg[2] == 0)) { delete_argument(&ap); /* This argument is consumed here */ switch(arg[1]) { case 'o': /* Output file name */ if (is_end_of_arguments(&ap)) { err_message = "Expecting file name after '-o'\n"; goto error; } free(ret->outFile); ret->outFile = strdup(consume_argument(&ap)); break; case 'i': /* Input file name */ if (is_end_of_arguments(&ap)) { err_message = "Expecting file name after '-i'\n"; goto error; } ret->inpFiles = read_string_array(consume_argument(&ap)); break; case '-': /* End of options */ goto end_of_options; default: err_message = "Unexpected command-line parameter\n"; goto error; } } else { /* Other parameters are ignored */ next_argument(&ap); } } /* end for each argument */ end_of_options: *_argc = ap.argc; /* Save the modified argc value */ finalize_argparse(&ap); return ret; error: fputs(err_message, stderr); pb_FreeParameters(ret); return NULL; } void pb_FreeParameters(struct pb_Parameters *p) { char **cpp; free(p->outFile); free_string_array(p->inpFiles); free(p); } int pb_Parameters_CountInputs(struct pb_Parameters *p) { int n; for (n = 0; p->inpFiles[n]; n++); return n; } /*****************************************************************************/ /* Timer routines */ static void accumulate_time(pb_Timestamp *accum, pb_Timestamp start, pb_Timestamp end) { #if _POSIX_VERSION >= 200112L *accum += end - start; #else # error "Timestamps not implemented for this system" #endif } #if _POSIX_VERSION >= 200112L static pb_Timestamp get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (pb_Timestamp) (tv.tv_sec * 1000000LL + tv.tv_usec); } #else # error "no supported time libraries are available on this platform" #endif void pb_ResetTimer(struct pb_Timer *timer) { timer->state = pb_Timer_STOPPED; #if _POSIX_VERSION >= 200112L timer->elapsed = 0; #else # error "pb_ResetTimer: not implemented for this system" #endif } void pb_StartTimer(struct pb_Timer *timer) { if (timer->state != pb_Timer_STOPPED) { fputs("Ignoring attempt to start a running timer\n", stderr); return; } timer->state = pb_Timer_RUNNING; #if _POSIX_VERSION >= 200112L { struct timeval tv; gettimeofday(&tv, NULL); timer->init = tv.tv_sec * 1000000LL + tv.tv_usec; } #else # error "pb_StartTimer: not implemented for this system" #endif } void pb_StartTimerAndSubTimer(struct pb_Timer *timer, struct pb_Timer *subtimer) { unsigned int numNotStopped = 0x3; // 11 if (timer->state != pb_Timer_STOPPED) { fputs("Warning: Timer was not stopped\n", stderr); numNotStopped &= 0x1; // Zero out 2^1 } if (subtimer->state != pb_Timer_STOPPED) { fputs("Warning: Subtimer was not stopped\n", stderr); numNotStopped &= 0x2; // Zero out 2^0 } if (numNotStopped == 0x0) { fputs("Ignoring attempt to start running timer and subtimer\n", stderr); return; } timer->state = pb_Timer_RUNNING; subtimer->state = pb_Timer_RUNNING; #if _POSIX_VERSION >= 200112L { struct timeval tv; gettimeofday(&tv, NULL); if (numNotStopped & 0x2) { timer->init = tv.tv_sec * 1000000LL + tv.tv_usec; } if (numNotStopped & 0x1) { subtimer->init = tv.tv_sec * 1000000LL + tv.tv_usec; } } #else # error "pb_StartTimer: not implemented for this system" #endif } void pb_StopTimer(struct pb_Timer *timer) { pb_Timestamp fini; if (timer->state != pb_Timer_RUNNING) { fputs("Ignoring attempt to stop a stopped timer\n", stderr); return; } timer->state = pb_Timer_STOPPED; #if _POSIX_VERSION >= 200112L { struct timeval tv; gettimeofday(&tv, NULL); fini = tv.tv_sec * 1000000LL + tv.tv_usec; } #else # error "pb_StopTimer: not implemented for this system" #endif accumulate_time(&timer->elapsed, timer->init, fini); timer->init = fini; } void pb_StopTimerAndSubTimer(struct pb_Timer *timer, struct pb_Timer *subtimer) { pb_Timestamp fini; unsigned int numNotRunning = 0x3; // 0b11 if (timer->state != pb_Timer_RUNNING) { fputs("Warning: Timer was not running\n", stderr); numNotRunning &= 0x1; // Zero out 2^1 } if (subtimer->state != pb_Timer_RUNNING) { fputs("Warning: Subtimer was not running\n", stderr); numNotRunning &= 0x2; // Zero out 2^0 } if (numNotRunning == 0x0) { fputs("Ignoring attempt to stop stopped timer and subtimer\n", stderr); return; } timer->state = pb_Timer_STOPPED; subtimer->state = pb_Timer_STOPPED; #if _POSIX_VERSION >= 200112L { struct timeval tv; gettimeofday(&tv, NULL); fini = tv.tv_sec * 1000000LL + tv.tv_usec; } #else # error "pb_StopTimer: not implemented for this system" #endif if (numNotRunning & 0x2) { accumulate_time(&timer->elapsed, timer->init, fini); timer->init = fini; } if (numNotRunning & 0x1) { accumulate_time(&subtimer->elapsed, subtimer->init, fini); subtimer->init = fini; } } /* Get the elapsed time in seconds. */ double pb_GetElapsedTime(struct pb_Timer *timer) { double ret; if (timer->state != pb_Timer_STOPPED) { fputs("Elapsed time from a running timer is inaccurate\n", stderr); } #if _POSIX_VERSION >= 200112L ret = timer->elapsed / 1e6; #else # error "pb_GetElapsedTime: not implemented for this system" #endif return ret; } void pb_InitializeTimerSet(struct pb_TimerSet *timers) { int n; timers->wall_begin = get_time(); timers->current = pb_TimerID_NONE; timers->async_markers = NULL; for (n = 0; n < pb_TimerID_LAST; n++) { pb_ResetTimer(&timers->timers[n]); timers->sub_timer_list[n] = NULL; // free first? } } void pb_AddSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID pb_Category) { struct pb_SubTimer *subtimer = (struct pb_SubTimer *) malloc (sizeof(struct pb_SubTimer)); int len = strlen(label); subtimer->label = (char *) malloc (sizeof(char)*(len+1)); sprintf(subtimer->label, "%s\0", label); pb_ResetTimer(&subtimer->timer); subtimer->next = NULL; struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[pb_Category]; if (subtimerlist == NULL) { subtimerlist = (struct pb_SubTimerList *) malloc (sizeof(struct pb_SubTimerList)); subtimerlist->subtimer_list = subtimer; timers->sub_timer_list[pb_Category] = subtimerlist; } else { // Append to list struct pb_SubTimer *element = subtimerlist->subtimer_list; while (element->next != NULL) { element = element->next; } element->next = subtimer; } } void pb_SwitchToSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID category) { // switchToSub( NULL, NONE // switchToSub( NULL, some // switchToSub( some, some // switchToSub( some, NONE -- tries to find "some" in NONE's sublist, which won't be printed struct pb_Timer *topLevelToStop = NULL; if (timers->current != category && timers->current != pb_TimerID_NONE) { // Switching to subtimer in a different category needs to stop the top-level current, different categoried timer. // NONE shouldn't have a timer associated with it, so exclude from branch topLevelToStop = &timers->timers[timers->current]; } struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[timers->current]; struct pb_SubTimer *curr = (subtimerlist == NULL) ? NULL : subtimerlist->current; if (timers->current != pb_TimerID_NONE) { if (curr != NULL && topLevelToStop != NULL) { pb_StopTimerAndSubTimer(topLevelToStop, &curr->timer); } else if (curr != NULL) { pb_StopTimer(&curr->timer); } else { pb_StopTimer(topLevelToStop); } } subtimerlist = timers->sub_timer_list[category]; struct pb_SubTimer *subtimer = NULL; if (label != NULL) { subtimer = subtimerlist->subtimer_list; while (subtimer != NULL) { if (strcmp(subtimer->label, label) == 0) { break; } else { subtimer = subtimer->next; } } } if (category != pb_TimerID_NONE) { if (subtimerlist != NULL) { subtimerlist->current = subtimer; } if (category != timers->current && subtimer != NULL) { pb_StartTimerAndSubTimer(&timers->timers[category], &subtimer->timer); } else if (subtimer != NULL) { // Same category, different non-NULL subtimer pb_StartTimer(&subtimer->timer); } else{ // Different category, but no subtimer (not found or specified as NULL) -- unprefered way of setting topLevel timer pb_StartTimer(&timers->timers[category]); } } timers->current = category; } void pb_SwitchToTimer(struct pb_TimerSet *timers, enum pb_TimerID timer) { /* Stop the currently running timer */ if (timers->current != pb_TimerID_NONE) { struct pb_SubTimer *currSubTimer = NULL; struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[timers->current]; if ( subtimerlist != NULL) { currSubTimer = timers->sub_timer_list[timers->current]->current; } if ( currSubTimer!= NULL) { pb_StopTimerAndSubTimer(&timers->timers[timers->current], &currSubTimer->timer); } else { pb_StopTimer(&timers->timers[timers->current]); } } timers->current = timer; if (timer != pb_TimerID_NONE) { pb_StartTimer(&timers->timers[timer]); } } void pb_PrintTimerSet(struct pb_TimerSet *timers, double *time) { pb_Timestamp wall_end = get_time(); struct pb_Timer *t = timers->timers; struct pb_SubTimer* sub = NULL; int maxSubLength; const char *categories[] = { "IO", "Kernel", "Copy", "Driver", "Copy Async", "Compute" }; const int maxCategoryLength = 10; int i; for(i = 1; i < pb_TimerID_LAST-1; ++i) { // exclude NONE and OVRELAP from this format if(pb_GetElapsedTime(&t[i]) != 0) { // Print Category Timer printf("%-*s: %f\n", maxCategoryLength, categories[i-1], pb_GetElapsedTime(&t[i])); if (timers->sub_timer_list[i] != NULL) { sub = timers->sub_timer_list[i]->subtimer_list; maxSubLength = 0; while (sub != NULL) { // Find longest SubTimer label if (strlen(sub->label) > maxSubLength) { maxSubLength = strlen(sub->label); } sub = sub->next; } // Fit to Categories if (maxSubLength <= maxCategoryLength) { maxSubLength = maxCategoryLength; } sub = timers->sub_timer_list[i]->subtimer_list; // Print SubTimers while (sub != NULL) { printf(" -%-*s: %f\n", maxSubLength, sub->label, pb_GetElapsedTime(&sub->timer)); sub = sub->next; } } } } if(pb_GetElapsedTime(&t[pb_TimerID_OVERLAP]) != 0) printf("CPU/Kernel Overlap: %f\n", pb_GetElapsedTime(&t[pb_TimerID_OVERLAP])); float walltime = (wall_end - timers->wall_begin)/ 1e6; printf("Timer Wall Time: %f\n", walltime); (*time) = walltime; } //void //pb_PrintTimerSet(struct pb_TimerSet *timers) //{ // // pb_Timestamp wall_end = get_time(); // // struct pb_Timer *t = timers->timers; // struct pb_SubTimer* sub = NULL; // // int maxSubLength; // // const char *categories[] = { // "IO", "Kernel", "Copy", "Driver", "Copy Async", "Compute" // }; // // const int maxCategoryLength = 10; // // int i; // for(i = 1; i < pb_TimerID_LAST-1; ++i) { // exclude NONE and OVRELAP from this format // if(pb_GetElapsedTime(&t[i]) != 0) { // // // Print Category Timer // printf("%-*s: %f\n", maxCategoryLength, categories[i-1], pb_GetElapsedTime(&t[i])); // // if (timers->sub_timer_list[i] != NULL) { // sub = timers->sub_timer_list[i]->subtimer_list; // maxSubLength = 0; // while (sub != NULL) { // // Find longest SubTimer label // if (strlen(sub->label) > maxSubLength) { // maxSubLength = strlen(sub->label); // } // sub = sub->next; // } // // // Fit to Categories // if (maxSubLength <= maxCategoryLength) { // maxSubLength = maxCategoryLength; // } // // sub = timers->sub_timer_list[i]->subtimer_list; // // // Print SubTimers // while (sub != NULL) { // printf(" -%-*s: %f\n", maxSubLength, sub->label, pb_GetElapsedTime(&sub->timer)); // sub = sub->next; // } // } // } // } // // if(pb_GetElapsedTime(&t[pb_TimerID_OVERLAP]) != 0) // printf("CPU/Kernel Overlap: %f\n", pb_GetElapsedTime(&t[pb_TimerID_OVERLAP])); // // float walltime = (wall_end - timers->wall_begin)/ 1e6; // printf("Timer Wall Time: %f\n", walltime); // //} void pb_DestroyTimerSet(struct pb_TimerSet * timers) { /* clean up all of the async event markers */ struct pb_async_time_marker_list ** event = &(timers->async_markers); while( *event != NULL) { struct pb_async_time_marker_list ** next = &((*event)->next); free(*event); (*event) = NULL; event = next; } int i = 0; for(i = 0; i < pb_TimerID_LAST; ++i) { if (timers->sub_timer_list[i] != NULL) { struct pb_SubTimer *subtimer = timers->sub_timer_list[i]->subtimer_list; struct pb_SubTimer *prev = NULL; while (subtimer != NULL) { free(subtimer->label); prev = subtimer; subtimer = subtimer->next; free(prev); } free(timers->sub_timer_list[i]); } } } // end utils.c // dump.h /*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ void dump_histo_img(unsigned char* histo, unsigned int height, unsigned int width, const char *filename); //end dump.h // dump.c /*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <stdint.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> // This function takes an HSV value and converts it to BMP. // We use this function to generate colored images with // Smooth spectrum traversal for the input and output images. RGB HSVtoRGB( float h, float s, float v ) { int i; float f, p, q, t; float r, g, b; RGB value={0,0,0}; if( s == 0 ) { r = g = b = v; return value; } h /= 60; i = floor( h ); f = h - i; p = v * ( 1 - s ); q = v * ( 1 - s * f ); t = v * ( 1 - s * ( 1 - f ) ); switch( i ) { case 0: r = v; g = t; b = p; break; case 1: r = q; g = v; b = p; break; case 2: r = p; g = v; b = t; break; case 3: r = p; g = q; b = v; break; case 4: r = t; g = p; b = v; break; default: r = v; g = p; b = q; break; } unsigned int temp = r*255; value.R = temp; temp = g*255; value.G = temp; temp = b*255; value.B = temp; return value; } void dump_histo_img(unsigned char* histo, unsigned int height, unsigned int width, const char *filename) { RGB* pixel_map = (RGB*) malloc (height*width*sizeof(RGB)); size_t y, x; for (y = 0; y < height; ++y) { for (x = 0; x < width; ++x) { unsigned char value = histo[y * width + x]; if (value == 0){ pixel_map[y*width+x].R = 0; pixel_map[y*width+x].G = 0; pixel_map[y*width+x].B = 0; } else { pixel_map[y*width+x] = HSVtoRGB(0.0,1.0,cbrt(1+ 63.0*((float)value)/((float)UINT8_MAX))/4); } } } create_bmp(pixel_map, height, width, filename); free(pixel_map); } // end dump.c // main.c #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include "common.h" typedef struct { } hist; int sequential( int argc, char* argv[], struct pb_Parameters *parameters, unsigned char **histo, unsigned int *size, double *time ) { printf("\nSequential execution:\n"); struct pb_TimerSet timers; if(!parameters->inpFiles[0]){ fputs("Input file expected\n", stderr); return -1; } int numIterations; if (argc >= 2){ numIterations = atoi(argv[1]); } else { fputs("Expected at least one command line argument\n", stderr); return -1; } pb_InitializeTimerSet(&timers); char *inputStr = "Input"; char *outputStr = "Output"; pb_AddSubTimer(&timers, inputStr, pb_TimerID_IO); pb_AddSubTimer(&timers, outputStr, pb_TimerID_IO); pb_SwitchToSubTimer(&timers, inputStr, pb_TimerID_IO); unsigned int img_width, img_height; unsigned int histo_width, histo_height; FILE* f = fopen(parameters->inpFiles[0],"rb"); int result = 0; result += fread(&img_width, sizeof(unsigned int), 1, f); result += fread(&img_height, sizeof(unsigned int), 1, f); result += fread(&histo_width, sizeof(unsigned int), 1, f); result += fread(&histo_height, sizeof(unsigned int), 1, f); if (result != 4){ fputs("Error reading input and output dimensions from file\n", stderr); return -1; } unsigned int histo_size = histo_width*histo_height; unsigned int img_size = img_width*img_height; (*size) = histo_size; unsigned int* img = (unsigned int*) malloc (img_size * sizeof(unsigned int)); (*histo) = (unsigned char*) calloc (histo_size, sizeof(unsigned char)); pb_SwitchToSubTimer(&timers, "Input", pb_TimerID_IO); result = fread(img, sizeof(unsigned int), img_width*img_height, f); fclose(f); if (result != img_width*img_height){ fputs("Error reading input array from file\n", stderr); return -1; } pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); int iter; for (iter = 0; iter < numIterations; iter++){ memset((*histo),0,histo_height*histo_width*sizeof(unsigned char)); unsigned int i; for (i = 0; i < img_size; ++i) { const unsigned int value = img[i]; if ((*histo)[value] < UINT8_MAX) { ++(*histo)[value]; } } } pb_SwitchToSubTimer(&timers, outputStr, pb_TimerID_IO); if (parameters->outFile) { dump_histo_img((*histo), histo_height, histo_width, parameters->outFile); } pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); free(img); pb_SwitchToTimer(&timers, pb_TimerID_NONE); printf("\n"); pb_PrintTimerSet(&timers, time); return 0; } int parallel( int argc, char* argv[], struct pb_Parameters *parameters, unsigned char **histo, unsigned int *size, double *time ) { struct pb_TimerSet timers; printf("\nParallel execution:\n"); if(!parameters->inpFiles[0]){ fputs("Input file expected\n", stderr); return -1; } int numIterations; if (argc >= 2){ numIterations = atoi(argv[1]); } else { fputs("Expected at least one command line argument\n", stderr); return -1; } pb_InitializeTimerSet(&timers); char *inputStr = "Input"; char *outputStr = "Output"; pb_AddSubTimer(&timers, inputStr, pb_TimerID_IO); pb_AddSubTimer(&timers, outputStr, pb_TimerID_IO); pb_SwitchToSubTimer(&timers, inputStr, pb_TimerID_IO); unsigned int img_width, img_height; unsigned int histo_width, histo_height; FILE* f = fopen(parameters->inpFiles[0],"rb"); int result = 0; result += fread(&img_width, sizeof(unsigned int), 1, f); result += fread(&img_height, sizeof(unsigned int), 1, f); result += fread(&histo_width, sizeof(unsigned int), 1, f); result += fread(&histo_height, sizeof(unsigned int), 1, f); if (result != 4){ fputs("Error reading input and output dimensions from file\n", stderr); return -1; } unsigned int histo_size = histo_height * histo_width; unsigned int img_size = img_width * img_height; *size = histo_size; unsigned int* img = (unsigned int*) malloc (img_width*img_height*sizeof(unsigned int)); (*histo) = (unsigned char*) calloc (histo_width*histo_height, sizeof(unsigned char)); pb_SwitchToSubTimer(&timers, "Input", pb_TimerID_IO); result = fread(img, sizeof(unsigned int), img_width*img_height, f); fclose(f); if (result != img_width*img_height){ fputs("Error reading input array from file\n", stderr); return -1; } pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); //int iter; #pragma omp parallel { unsigned char *inner_histo = (unsigned char *) calloc(histo_width * histo_height, sizeof(unsigned char)); double num_threads = omp_get_num_threads(); int thread_id = omp_get_thread_num(); int chunk_size = ceil(numIterations/num_threads); int iter_start = thread_id * chunk_size; int iter_end = (thread_id + 1) * chunk_size; for (int iter = iter_start; iter < iter_end; iter++) { memset(inner_histo, 0, histo_height * histo_width * sizeof(unsigned char)); unsigned int i; for (i = 0; i < img_width * img_height; ++i) { const unsigned int value = img[i]; if (inner_histo[value] < UINT8_MAX) { ++inner_histo[value]; } } for (i = 0; i < histo_size; i++) { (*histo)[i] = inner_histo[i]; } } free(inner_histo); } pb_SwitchToSubTimer(&timers, outputStr, pb_TimerID_IO); if (parameters->outFile) { dump_histo_img((*histo), histo_height, histo_width, parameters->outFile); } pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); free(img); pb_SwitchToTimer(&timers, pb_TimerID_NONE); printf("\n"); pb_PrintTimerSet(&timers, time); return 0; } //int parallel( // int argc, // char* argv[], // struct pb_Parameters *parameters, // unsigned char **histo, // unsigned int *size, // double *time //) { // struct pb_TimerSet timers; // // printf("\nParallel execution:\n"); // // if(!parameters->inpFiles[0]){ // fputs("Input file expected\n", stderr); // return -1; // } // // int numIterations; // if (argc >= 2){ // numIterations = atoi(argv[1]); // } else { // fputs("Expected at least one command line argument\n", stderr); // return -1; // } // // pb_InitializeTimerSet(&timers); // // char *inputStr = "Input"; // char *outputStr = "Output"; // // pb_AddSubTimer(&timers, inputStr, pb_TimerID_IO); // pb_AddSubTimer(&timers, outputStr, pb_TimerID_IO); // // pb_SwitchToSubTimer(&timers, inputStr, pb_TimerID_IO); // // unsigned int img_width, img_height; // unsigned int histo_width, histo_height; // // FILE* f = fopen(parameters->inpFiles[0],"rb"); // int result = 0; // // result += fread(&img_width, sizeof(unsigned int), 1, f); // result += fread(&img_height, sizeof(unsigned int), 1, f); // result += fread(&histo_width, sizeof(unsigned int), 1, f); // result += fread(&histo_height, sizeof(unsigned int), 1, f); // // if (result != 4){ // fputs("Error reading input and output dimensions from file\n", stderr); // return -1; // } // // unsigned int histo_size = histo_height * histo_width; // unsigned int img_size = img_width*img_height; // (*size) = histo_size; // // unsigned int* img = (unsigned int*) malloc (img_width*img_height*sizeof(unsigned int)); // (*histo) = (unsigned char*) calloc (histo_width*histo_height, sizeof(unsigned char)); // // pb_SwitchToSubTimer(&timers, "Input", pb_TimerID_IO); // // result = fread(img, sizeof(unsigned int), img_width*img_height, f); // // fclose(f); // // if (result != img_width*img_height){ // fputs("Error reading input array from file\n", stderr); // return -1; // } // // pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); // // //int iter; // // for (int iter = 0; iter < numIterations; iter++) { // // memset((*histo), 0, histo_size * sizeof(unsigned char)); // // unsigned char *hista = (unsigned char*) malloc (histo_size *4 * sizeof(unsigned char)); // // //memset(hista, 0, histo_size * 4); // // int thread_id, offset, iter_start, iter_end, num_thread, chunk_size; //#pragma omp parallel \ // private (thread_id, offset, iter_start, iter_end) \ // shared (chunk_size, hista, num_thread) // { // num_thread = omp_get_num_threads(); // thread_id = omp_get_thread_num(); // offset = thread_id * histo_size; // // memset(hista + offset, 0 , histo_size); // // chunk_size = ceil(img_size/(double)num_thread); // iter_start = thread_id * chunk_size; // iter_end = (thread_id + 1) * chunk_size; // if (iter_end > img_size) iter_end = img_size; ////#pragma omp critical // //{ // for (int i = iter_start; i < iter_end; ++i) { // const unsigned int value = img[i]; // if (hista[offset + value] < UINT8_MAX) { // ++hista[offset + value]; // } // } // //} //#pragma omp barrier // } // // //#pragma omp for // for(int i=0; i<histo_size; i++) { // for(int t=0; t<num_thread; t++) { // if ((*histo)[i] < UINT8_MAX - hista[histo_size*t + i]) { // (*histo)[i] += hista[histo_size*t + i]; // } // else { // (*histo)[i] = UINT8_MAX; // } // } // } // // free(hista); // } // // pb_SwitchToSubTimer(&timers, outputStr, pb_TimerID_IO); // // if (parameters->outFile) { // dump_histo_img((*histo), histo_height, histo_width, parameters->outFile); // } // // pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); // // free(img); // // pb_SwitchToTimer(&timers, pb_TimerID_NONE); // // printf("\n"); // pb_PrintTimerSet(&timers, time); // // return 0; //} int main(int argc, char* argv[]) { double sequential_time, parallel_time; int err; unsigned char* sequential_histo, *parallel_histo; unsigned int sequential_histo_size, parallel_histo_size; struct pb_Parameters *parameters; parameters = pb_ReadParameters(&argc, argv); if (!parameters) return -1; err = parallel(argc, argv, parameters, &parallel_histo, &parallel_histo_size, &parallel_time); if (err) { return err; } err = sequential(argc, argv, parameters, &sequential_histo, &sequential_histo_size, &sequential_time); if (err) { return err; } finish_2(sequential_histo, parallel_histo, sequential_histo_size, parallel_histo_size, sequential_time, parallel_time); pb_FreeParameters(parameters); free(sequential_histo); free(parallel_histo); } // end main.c
GB_unop__identity_fp32_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp32_int64 // op(A') function: GB_unop_tran__identity_fp32_int64 // C type: float // A type: int64_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp32_int64 ( float *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_atomics.h
//------------------------------------------------------------------------------ // GB_atomics.h: definitions for atomic operations //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // All atomic operations used by SuiteSparse:GraphBLAS appear in this file. // These atomic operations assume either an ANSI C11 compiler that supports // OpenMP 3.1 or later, or Microsoft Visual Studio on 64-bit Windows (which // only supports OpenMP 2.0). SuiteSparse:GraphBLAS is not supported on 32-bit // Windows. #ifndef GB_ATOMICS_H #define GB_ATOMICS_H #include "GB.h" //------------------------------------------------------------------------------ // determine the architecture //------------------------------------------------------------------------------ #if __x86_64__ // on the x86, atomic updates can be more aggresive. The MIN, MAX, EQ, // XNOR, and ANY monoids are implemented with atomic compare/exchange. #define GB_X86_64 1 #else // on the ARM, Power8/9, and others, only use built-in #pragma omp atomic // updates. Do not use atomic compare/exchange. #define GB_X86_64 0 #endif //------------------------------------------------------------------------------ // atomic updates //------------------------------------------------------------------------------ // Whenever possible, the OpenMP pragma is used with a clause (as introduced in // OpenMP 3.1), as follow: // // #pragma omp atomic [clause] // // where [clause] is read, write, update, or capture. // // Microsoft Visual Studio only supports OpenMP 2.0, which does not have the // [clause]. Without the [clause], #pragma omp atomic is like // #pragma omp atomic update, but the expression can only be one of: // // x binop= expression // x++ // ++x // x-- // --x // // where binop is one of these operators: + * - / & ^ | << >> // // OpenMP 3.0 and later support additional options for the "update" clause, // but SuiteSparse:GraphBLAS uses only this form: // // x binop= expression // // where binop is: + * & ^ | // // This atomic update is used for the PLUS, TIMES, LAND, LXOR, and LOR monoids, // when applied to the built-in types. For PLUS and TIMES, these are the 10 // types INTx, UINTx, FP32, FP64 (for x = 8, 16, 32, and 64). For the boolean // monoids, only the BOOL type is used. // // As a result, the atomic updates are the same for gcc and icc (which support // OpenMP 3.0 or later) with the "update" clause. For MS Visual Studio, the // "update" clause is removed since it supports OpenMP 2.0. #if ( _OPENMP >= 201307 ) // OpenMP 4.0 or later #define GB_ATOMIC_UPDATE GB_PRAGMA (omp atomic update seq_cst) #elif ( _OPENMP >= 201107 ) // OpenMP 3.1 #define GB_ATOMIC_UPDATE GB_PRAGMA (omp atomic update) #elif ( _OPENMP >= 199810 ) // OpenMP 1.0 to 3.0: no optional clauses, "update" is assumed #define GB_ATOMIC_UPDATE GB_PRAGMA (omp atomic) #else // no OpenMP at all #define GB_ATOMIC_UPDATE #endif //------------------------------------------------------------------------------ // atomic read and write //------------------------------------------------------------------------------ // In Microsoft Visual Studio, simple reads and writes to properly aligned // 64-bit values are already atomic on 64-bit Windows for any architecture // supported by Windows (any Intel or ARM architecture). See: // https://docs.microsoft.com/en-us/windows/win32/sync/interlocked-variable-access // SuiteSparse:GraphBLAS is not supported on 32-bit Windows. Thus, there // is no need for atomic reads/writes when compiling GraphBLAS on Windows // with MS Visual Studio. // ARM, Power8/9, and others need the explicit atomic read/write. // x86: no atomic read/write is needed. #if GB_X86_64 // x86: no atomic read/write is needed. #define GB_ATOMIC_READ #define GB_ATOMIC_WRITE #elif ( _OPENMP >= 201811 ) // OpenMP 5.0 or later #define GB_ATOMIC_READ GB_PRAGMA (omp atomic read acquire) #define GB_ATOMIC_WRITE GB_PRAGMA (omp atomic write release) #elif ( _OPENMP >= 201307 ) // OpenMP 4.0 and 4.5 #define GB_ATOMIC_READ GB_PRAGMA (omp atomic read seq_cst) #define GB_ATOMIC_WRITE GB_PRAGMA (omp atomic write seq_cst) #elif ( _OPENMP >= 201107 ) // OpenMP 3.1 #define GB_ATOMIC_READ GB_PRAGMA (omp atomic read) #define GB_ATOMIC_WRITE GB_PRAGMA (omp atomic write) #else // OpenMP 3.0 or earlier, or no OpenMP at all #define GB_ATOMIC_READ #define GB_ATOMIC_WRITE #endif //------------------------------------------------------------------------------ // flush //------------------------------------------------------------------------------ #if defined ( _OPENMP ) // All versions of OpenMP have the #pragma omp flush #define GB_OMP_FLUSH GB_PRAGMA (omp flush) #else // no OpenMP at all #define GB_OMP_FLUSH #endif //------------------------------------------------------------------------------ // atomic capture //------------------------------------------------------------------------------ // An atomic capture loads the prior value of the target into a thread-local // result, and then overwrites the target with the new value. The target is a // value that is shared between threads. The value and result arguments are // thread-local. SuiteSparse:GraphBLAS uses four atomic capture methods, // defined below, of the form: // // { result = target ; target = value ; } for int64_t and int8_t // { result = target ; target |= value ; } for int64_t // { result = target++ ; } for int64_t // // OpenMP 3.1 and later supports atomic captures with a "capture" clause: // // #pragma omp atomic capture // { result = target ; target = value ; } // // or with a binary operator // // #pragma omp atomic capture // { result = target ; target binop= value ; } // // MS Visual Studio supports only OpenMP 2.0, and does not support any // "capture" clause. Thus, on Windows, _InterlockedExchange* and // _InterlockedOr* functions are used instead, as described here: // // https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedexchange-intrinsic-functions // https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedor-intrinsic-functions #if ( _OPENMP >= 201307 ) // OpenMP 4.0 or later #define GB_ATOMIC_CAPTURE GB_PRAGMA (omp atomic capture seq_cst) #elif ( _OPENMP >= 201107 ) // OpenMP 3.1 #define GB_ATOMIC_CAPTURE GB_PRAGMA (omp atomic capture) #elif ( _OPENMP >= 199810 ) // OpenMP 1.0 to 3.0: generate an intentional compile-time error if any // attempt is made to use the atomic capture. #define GB_ATOMIC_CAPTURE atomic capture not available #else // no OpenMP at all #define GB_ATOMIC_CAPTURE #endif //-------------------------------------------------------------------------- // atomic capture for int64_t //-------------------------------------------------------------------------- // int64_t result, target, value ; // do this atomically: { result = target ; target = value ; } #if GB_MICROSOFT #define GB_ATOMIC_CAPTURE_INT64(result, target, value) \ { \ result = _InterlockedExchange64 \ ((int64_t volatile *) (&(target)), value) ; \ } #else #define GB_ATOMIC_CAPTURE_INT64(result, target, value) \ { \ GB_ATOMIC_CAPTURE \ { \ result = target ; \ target = value ; \ } \ } #endif //-------------------------------------------------------------------------- // atomic capture for int8_t //-------------------------------------------------------------------------- // int8_t result, target, value ; // do this atomically: { result = target ; target = value ; } #if GB_MICROSOFT #define GB_ATOMIC_CAPTURE_INT8(result, target, value) \ { \ result = _InterlockedExchange8 \ ((char volatile *) &(target), value) ; \ } #else #define GB_ATOMIC_CAPTURE_INT8(result, target, value) \ { \ GB_ATOMIC_CAPTURE \ { \ result = target ; \ target = value ; \ } \ } #endif //-------------------------------------------------------------------------- // atomic capture with bitwise OR, for int64_t //-------------------------------------------------------------------------- // int64_t result, target, value ; // do this atomically: { result = target ; target |= value ; } #if GB_MICROSOFT #define GB_ATOMIC_CAPTURE_INT64_OR(result, target, value) \ { \ result = _InterlockedOr64 \ ((int64_t volatile *) (&(target)), value) ; \ } #else #define GB_ATOMIC_CAPTURE_INT64_OR(result, target, value) \ { \ GB_ATOMIC_CAPTURE \ { \ result = target ; \ target |= value ; \ } \ } #endif //-------------------------------------------------------------------------- // atomic post-increment //-------------------------------------------------------------------------- // Increment an int64_t value and return the value prior to being // incremented: // // int64_t result = target++ ; // // See // https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedincrement-intrinsic-functions?view=msvc-160 // The MS Visual Studio version computes result = ++target, so result must // be decremented by one. #if GB_MICROSOFT #define GB_ATOMIC_CAPTURE_INC64(result,target) \ { \ result = _InterlockedIncrement64 \ ((int64_t volatile *) (&(target))) - 1 ; \ } #else #define GB_ATOMIC_CAPTURE_INC64(result,target) \ { \ GB_ATOMIC_CAPTURE \ result = (target)++ ; \ } #endif //------------------------------------------------------------------------------ // atomic compare-and-exchange //------------------------------------------------------------------------------ // Atomic compare-and-exchange is used to implement the MAX, MIN and EQ // monoids, for the fine-grain saxpy-style matrix multiplication. Ideally, // OpenMP would be used for these atomic operation, but they are not supported. // So compiler-specific functions are used instead. // In gcc, icc, and clang, the atomic compare-and-exchange function // __atomic_compare_exchange computes the following, as a single atomic // operation, where type_t is any 8, 16, 32, or 64 bit scalar type. In // SuiteSparse:GraphBLAS, type_t can be bool, int8_t, uint8_t, int16_t, // uint16_t, int32_t, uint32_t, int64_t, uint64_t, float, or double. // // bool __atomic_compare_exchange // ( // type_t *target, // input/output // type_t *expected, // input/output // type_t *desired, // input only, even though it is a pointer // bool weak, // true, for SuiteSparse:GraphBLAS // int success_memorder, // __ATOMIC_SEQ_CST for SuiteSparse:GrB // int failure_memorder // __ATOMIC_SEQ_CST for SuiteSparse:GrB // ) // { // bool result ; // if (*target == *expected) // { // *target = *desired ; // result = true ; // } // else // { // *expected = *target ; // result = false ; // } // return (result) ; // } // // The generic __atomic_compare_exchange function in gcc (also supported by // icc) computes the above for any of these 8, 16, 32, or 64-bit scalar types // needed in SuiteSparse:GraphBLAS. SuiteSparse:GraphBLAS does not require the // 'expected = target' assignment if the result is false. It ignores the // value of 'expected' after the operation completes. The target, expected, // and desired parameters are all provided as pointers: // // See https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html // Microsoft Visual Studio provides similar but not identical functionality in // the _InterlockedCompareExchange functions, but they are named differently // for different types. Only int8_t, int16_t, int32_t, and int64_t types are // supported. For the int64_t case, the following is performed atomically: // // int64_t _InterlockedCompareExchange64 // ( // int64_t volatile *target, // input/output // int64_t desired // input only // int64_t expected // ) // { // int64_t result = *target ; // if (*target == expected) // { // target = desired ; // } // return (result) ; // } // // It does not assign "expected = target" if the test is false, but // SuiteSparse:GraphBLAS does not require this action. It does not return a // boolean result, but instead returns the original value of (*target). // However, this can be compared with the expected value to obtain the // same boolean result as __atomic_compare_exchange. // // Type punning is used to extend these signed integer types to unsigned // integers of the same number of bytes, and to float and double. #if GB_MICROSOFT //-------------------------------------------------------------------------- // GB_PUN: type punning //-------------------------------------------------------------------------- // With type punning, a value is treated as a different type, but with no // typecasting. The address of the variable is first typecasted to a (type // *) pointer, and then the pointer is dereferenced. Type punning is only // needed to extend the atomic compare/exchange functions for Microsoft // Visual Studio. #define GB_PUN(type,value) (*((type *) (&(value)))) //-------------------------------------------------------------------------- // compare/exchange for MS Visual Studio //-------------------------------------------------------------------------- // bool, int8_t, and uint8_t #define GB_ATOMIC_COMPARE_EXCHANGE_8(target, expected, desired) \ ( \ GB_PUN (int8_t, expected) == \ _InterlockedCompareExchange8 ((int8_t volatile *) (target), \ GB_PUN (int8_t, desired), GB_PUN (int8_t, expected)) \ ) // int16_t and uint16_t #define GB_ATOMIC_COMPARE_EXCHANGE_16(target, expected, desired) \ ( \ GB_PUN (int16_t, expected) == \ _InterlockedCompareExchange16 ((int16_t volatile *) (target), \ GB_PUN (int16_t, desired), GB_PUN (int16_t, expected)) \ ) // float, int32_t, and uint32_t #define GB_ATOMIC_COMPARE_EXCHANGE_32(target, expected, desired) \ ( \ GB_PUN (int32_t, expected) == \ _InterlockedCompareExchange ((int32_t volatile *) (target), \ GB_PUN (int32_t, desired), GB_PUN (int32_t, expected)) \ ) // double, int64_t, and uint64_t #define GB_ATOMIC_COMPARE_EXCHANGE_64(target, expected, desired) \ ( \ GB_PUN (int64_t, expected) == \ _InterlockedCompareExchange64 ((int64_t volatile *) (target), \ GB_PUN (int64_t, desired), GB_PUN (int64_t, expected)) \ ) #else //-------------------------------------------------------------------------- // compare/exchange for gcc, icc, and clang on x86 and Power8/9 //-------------------------------------------------------------------------- // the compare/exchange function is generic for any type #define GB_ATOMIC_COMPARE_EXCHANGE_X(target, expected, desired) \ __atomic_compare_exchange (target, &expected, &desired, \ true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) \ // bool, int8_t, and uint8_t #define GB_ATOMIC_COMPARE_EXCHANGE_8(target, expected, desired) \ GB_ATOMIC_COMPARE_EXCHANGE_X(target, expected, desired) // int16_t and uint16_t #define GB_ATOMIC_COMPARE_EXCHANGE_16(target, expected, desired) \ GB_ATOMIC_COMPARE_EXCHANGE_X (target, expected, desired) // float, int32_t, and uint32_t #define GB_ATOMIC_COMPARE_EXCHANGE_32(target, expected, desired) \ GB_ATOMIC_COMPARE_EXCHANGE_X (target, expected, desired) // double, int64_t, and uint64_t #define GB_ATOMIC_COMPARE_EXCHANGE_64(target, expected, desired) \ GB_ATOMIC_COMPARE_EXCHANGE_X (target, expected, desired) #endif #endif
parallel-inl.h
// Copyright (c) 2018 Doyub Kim // // I am making my contributions/submissions to this project solely in my // personal capacity and am not conveying any rights to any intellectual // property of any third parties. #ifndef INCLUDE_JET_DETAIL_PARALLEL_INL_H_ #define INCLUDE_JET_DETAIL_PARALLEL_INL_H_ #include "array_utils-inl.h" #include "array_utils.h" #include "constants.h" #include "macros.h" #include <algorithm> #include <functional> #include <future> #include <vector> #ifdef JET_TASKING_TBB #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/parallel_sort.h> #include <tbb/task.h> #elif defined(JET_TASKING_CPP11THREADS) #include <thread> #endif namespace vox { namespace geometry { namespace internal { // NOTE - This abstraction takes a lambda which should take captured // variables by *value* to ensure no captured references race // with the task itself. template <typename TASK_T> inline void schedule(TASK_T &&fcn) { #ifdef JET_TASKING_TBB struct LocalTBBTask : public tbb::task { TASK_T func; tbb::task *execute() override { func(); return nullptr; } LocalTBBTask(TASK_T &&f) : func(std::forward<TASK_T>(f)) {} }; auto *tbb_node = new (tbb::task::allocate_root()) LocalTBBTask(std::forward<TASK_T>(fcn)); tbb::task::enqueue(*tbb_node); #elif defined(JET_TASKING_CPP11THREADS) std::thread thread(fcn); thread.detach(); #else // OpenMP or Serial --> synchronous! fcn(); #endif } template <typename TASK_T> using operator_return_t = typename std::result_of<TASK_T()>::type; // NOTE - see above, same issues associated with schedule() template <typename TASK_T> inline auto async(TASK_T &&fcn) -> std::future<operator_return_t<TASK_T>> { using package_t = std::packaged_task<operator_return_t<TASK_T>()>; auto task = new package_t(std::forward<TASK_T>(fcn)); auto future = task->get_future(); schedule([=]() { (*task)(); delete task; }); return future; } // Adopted from: // Radenski, A. // Shared Memory, Message Passing, and Hybrid Merge Sorts for Standalone and // Clustered SMPs. Proc PDPTA'11, the 2011 International Conference on Parallel // and Distributed Processing Techniques and Applications, CSREA Press // (H. Arabnia, Ed.), 2011, pp. 367 - 373. template <typename RandomIterator, typename RandomIterator2, typename CompareFunction> void merge(RandomIterator a, size_t size, RandomIterator2 temp, CompareFunction compareFunction) { size_t i1 = 0; size_t i2 = size / 2; size_t tempi = 0; while (i1 < size / 2 && i2 < size) { if (compareFunction(a[i1], a[i2])) { temp[tempi] = a[i1]; i1++; } else { temp[tempi] = a[i2]; i2++; } tempi++; } while (i1 < size / 2) { temp[tempi] = a[i1]; i1++; tempi++; } while (i2 < size) { temp[tempi] = a[i2]; i2++; tempi++; } // Copy sorted temp array into main array, a parallelFor(kZeroSize, size, [&](size_t i) { a[i] = temp[i]; }); } template <typename RandomIterator, typename RandomIterator2, typename CompareFunction> void parallelMergeSort(RandomIterator a, size_t size, RandomIterator2 temp, unsigned int numThreads, CompareFunction compareFunction) { if (numThreads == 1) { std::sort(a, a + size, compareFunction); } else if (numThreads > 1) { std::vector<std::future<void>> pool; pool.reserve(2); auto launchRange = [compareFunction](RandomIterator begin, size_t k2, RandomIterator2 temp, unsigned int numThreads) { parallelMergeSort(begin, k2, temp, numThreads, compareFunction); }; pool.emplace_back(internal::async([=]() { launchRange(a, size / 2, temp, numThreads / 2); })); pool.emplace_back(internal::async( [=]() { launchRange(a + size / 2, size - size / 2, temp + size / 2, numThreads - numThreads / 2); })); // Wait for jobs to finish for (auto &f : pool) { if (f.valid()) { f.wait(); } } merge(a, size, temp, compareFunction); } } } // namespace internal template <typename RandomIterator, typename T> void parallelFill(const RandomIterator &begin, const RandomIterator &end, const T &value, ExecutionPolicy policy) { auto diff = end - begin; if (diff <= 0) { return; } size_t size = static_cast<size_t>(diff); parallelFor( kZeroSize, size, [begin, value](size_t i) { begin[i] = value; }, policy); } // Adopted from http://ideone.com/Z7zldb template <typename IndexType, typename Function> void parallelFor(IndexType start, IndexType end, const Function &func, ExecutionPolicy policy) { if (start > end) { return; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_for(start, end, func); } else { for (auto i = start; i < end; ++i) { func(i); } } #elif JET_TASKING_CPP11THREADS // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // [Helper] Inner loop auto launchRange = [&func](IndexType k1, IndexType k2) { for (IndexType k = k1; k < k2; k++) { func(k); } }; // Create pool and launch jobs std::vector<std::thread> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); for (unsigned int i = 0; i + 1 < numThreads && i1 < end; ++i) { pool.emplace_back(launchRange, i1, i2); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back(launchRange, i1, end); } // Wait for jobs to finish for (std::thread &t : pool) { if (t.joinable()) { t.join(); } } #else #ifdef JET_TASKING_OPENMP if (policy == ExecutionPolicy::kParallel) { #pragma omp parallel for #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) for (ssize_t i = start; i < ssize_t(end); ++i) { #else // !MSVC || Intel for (auto i = start; i < end; ++i) { #endif // MSVC && !Intel func(i); } } else { for (auto i = start; i < end; ++i) { func(i); } } #else // JET_TASKING_OPENMP for (auto i = start; i < end; ++i) { func(i); } #endif // JET_TASKING_OPENMP #endif } template <typename IndexType, typename Function> void parallelRangeFor(IndexType start, IndexType end, const Function &func, ExecutionPolicy policy) { if (start > end) { return; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_for(tbb::blocked_range<IndexType>(start, end), [&func](const tbb::blocked_range<IndexType> &range) { func(range.begin(), range.end()); }); } else { func(start, end); } #else // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // Create pool and launch jobs std::vector<std::future<void>> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); for (unsigned int i = 0; i + 1 < numThreads && i1 < end; ++i) { pool.emplace_back(internal::async([=]() { func(i1, i2); })); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back(internal::async([=]() { func(i1, end); })); } // Wait for jobs to finish for (auto &f : pool) { if (f.valid()) { f.wait(); } } #endif } template <typename IndexType, typename Function> void parallelFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, const Function &function, ExecutionPolicy policy) { parallelFor( beginIndexY, endIndexY, [&](IndexType j) { for (IndexType i = beginIndexX; i < endIndexX; ++i) { function(i, j); } }, policy); } template <typename IndexType, typename Function> void parallelRangeFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, const Function &function, ExecutionPolicy policy) { parallelRangeFor( beginIndexY, endIndexY, [&](IndexType jBegin, IndexType jEnd) { function(beginIndexX, endIndexX, jBegin, jEnd); }, policy); } template <typename IndexType, typename Function> void parallelFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, IndexType beginIndexZ, IndexType endIndexZ, const Function &function, ExecutionPolicy policy) { parallelFor( beginIndexZ, endIndexZ, [&](IndexType k) { for (IndexType j = beginIndexY; j < endIndexY; ++j) { for (IndexType i = beginIndexX; i < endIndexX; ++i) { function(i, j, k); } } }, policy); } template <typename IndexType, typename Function> void parallelRangeFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, IndexType beginIndexZ, IndexType endIndexZ, const Function &function, ExecutionPolicy policy) { parallelRangeFor( beginIndexZ, endIndexZ, [&](IndexType kBegin, IndexType kEnd) { function(beginIndexX, endIndexX, beginIndexY, endIndexY, kBegin, kEnd); }, policy); } template <typename IndexType, typename Value, typename Function, typename Reduce> Value parallelReduce(IndexType start, IndexType end, const Value &identity, const Function &func, const Reduce &reduce, ExecutionPolicy policy) { if (start > end) { return identity; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { return tbb::parallel_reduce( tbb::blocked_range<IndexType>(start, end), identity, [&func](const tbb::blocked_range<IndexType> &range, const Value &init) { return func(range.begin(), range.end(), init); }, reduce); } else { (void)reduce; return func(start, end, identity); } #else // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // Results std::vector<Value> results(numThreads, identity); // [Helper] Inner loop auto launchRange = [&](IndexType k1, IndexType k2, unsigned int tid) { results[tid] = func(k1, k2, identity); }; // Create pool and launch jobs std::vector<std::future<void>> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); unsigned int tid = 0; for (; tid + 1 < numThreads && i1 < end; ++tid) { pool.emplace_back(internal::async([=]() { launchRange(i1, i2, tid); })); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back(internal::async([=]() { launchRange(i1, end, tid); })); } // Wait for jobs to finish for (auto &f : pool) { if (f.valid()) { f.wait(); } } // Gather Value finalResult = identity; for (const Value &val : results) { finalResult = reduce(val, finalResult); } return finalResult; #endif } template <typename RandomIterator, typename CompareFunction> void parallelSort(RandomIterator begin, RandomIterator end, CompareFunction compareFunction, ExecutionPolicy policy) { if (end < begin) { return; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_sort(begin, end, compareFunction); } else { std::sort(begin, end, compareFunction); } #else size_t size = static_cast<size_t>(end - begin); using value_type = typename std::iterator_traits<RandomIterator>::value_type; std::vector<value_type> temp(size); // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; internal::parallelMergeSort(begin, size, temp.begin(), numThreads, compareFunction); #endif } template <typename RandomIterator> void parallelSort(RandomIterator begin, RandomIterator end, ExecutionPolicy policy) { parallelSort(begin, end, std::less<typename std::iterator_traits<RandomIterator>::value_type>(), policy); } } // namespace vox } // namespace geometry #endif // INCLUDE_JET_DETAIL_PARALLEL_INL_H_
IndexedFaceMesh.h
#ifndef __INDEXEDFACEMESH_H__ #define __INDEXEDFACEMESH_H__ #include <vector> #include <array> #include "Common/Common.h" #include <iterator> namespace Utilities { class IndexedFaceMesh { public: struct Edge { std::array<unsigned int, 2> m_face; std::array<unsigned int, 2> m_vert; }; public: typedef std::vector<unsigned int> Faces; typedef std::vector<Vector3r> FaceNormals; typedef std::vector<Vector3r> VertexNormals; typedef std::vector<std::vector<unsigned int>> FacesEdges; typedef std::vector<Edge> Edges; typedef std::vector<std::vector<unsigned int>> VerticesEdges; typedef std::vector<std::vector<unsigned int>> VerticesFaces; typedef std::vector<unsigned int> UVIndices; typedef std::vector<Vector2r> UVs; protected: unsigned int m_numPoints; Faces m_indices; Edges m_edges; FacesEdges m_facesEdges; bool m_closed; UVIndices m_uvIndices; UVs m_uvs; VerticesFaces m_verticesFaces; VerticesEdges m_verticesEdges; const unsigned int m_verticesPerFace = 3u; FaceNormals m_normals; VertexNormals m_vertexNormals; bool m_flatShading; public: IndexedFaceMesh(); IndexedFaceMesh(IndexedFaceMesh const& other); IndexedFaceMesh& operator=(IndexedFaceMesh const& other); ~IndexedFaceMesh(); void release(); bool isClosed() const; bool getFlatShading() const { return m_flatShading; } void setFlatShading(const bool v) { m_flatShading = v; } void initMesh(const unsigned int nPoints, const unsigned int nEdges, const unsigned int nFaces); void addFace(const unsigned int * const indices); void addFace(const int * const indices); void addUV(const Real u, const Real v); void addUVIndex(const unsigned int index); const Faces& getFaces() const { return m_indices; } Faces& getFaces(){ return m_indices; } const FaceNormals& getFaceNormals() const { return m_normals; } FaceNormals& getFaceNormals(){ return m_normals; } const VertexNormals& getVertexNormals() const { return m_vertexNormals; } VertexNormals& getVertexNormals(){ return m_vertexNormals; } Edges& getEdges() { return m_edges; } const Edges& getEdges() const { return m_edges; } const FacesEdges& getFacesEdges() const { return m_facesEdges; } const UVIndices& getUVIndices() const { return m_uvIndices; } const UVs& getUVs() const { return m_uvs; } const VerticesFaces& getVertexFaces() const { return m_verticesFaces; } const VerticesEdges& getVertexEdges() const { return m_verticesEdges; } unsigned int numVertices() const { return m_numPoints; } unsigned int numFaces() const { return (unsigned int)m_indices.size() / m_verticesPerFace; } unsigned int numEdges() const { return (unsigned int)m_edges.size(); } unsigned int numUVs() const { return (unsigned int)m_uvs.size(); } void copyUVs(const UVIndices& uvIndices, const UVs& uvs); void buildNeighbors(); template<class PositionData> void updateNormals(const PositionData &pd, const unsigned int offset); template<class PositionData> void updateVertexNormals(const PositionData &pd); unsigned int getVerticesPerFace() const; }; template<class PositionData> void IndexedFaceMesh::updateNormals(const PositionData &pd, const unsigned int offset) { m_normals.resize(numFaces()); #pragma omp parallel default(shared) { #pragma omp for schedule(static) for (int i = 0; i < (int) numFaces(); i++) { // Get first three points of face const Vector3r &a = pd.getPosition(m_indices[m_verticesPerFace*i] + offset); const Vector3r &b = pd.getPosition(m_indices[m_verticesPerFace*i + 1] + offset); const Vector3r &c = pd.getPosition(m_indices[m_verticesPerFace*i + 2] + offset); // Create normal Vector3r v1 = b - a; Vector3r v2 = c - a; m_normals[i] = v1.cross(v2); m_normals[i].normalize(); // fix normals of degenerate triangles that can become zero vectors if (m_normals[i].squaredNorm() < 1e-6f) m_normals[i] = Vector3r::UnitX(); } } } template<class PositionData> void IndexedFaceMesh::updateVertexNormals(const PositionData &pd) { m_vertexNormals.resize(numVertices()); for (unsigned int i = 0; i < numVertices(); i++) { m_vertexNormals[i].setZero(); } for (unsigned int i = 0u; i < numFaces(); i++) { const Vector3r &n = m_normals[i]; m_vertexNormals[m_indices[m_verticesPerFace*i]] += n; m_vertexNormals[m_indices[m_verticesPerFace*i + 1]] += n; m_vertexNormals[m_indices[m_verticesPerFace*i + 2]] += n; } for (unsigned int i = 0; i < numVertices(); i++) { m_vertexNormals[i].normalize(); } } } #endif
hello.c
#include <stdio.h> #include <omp.h> int main() { #pragma omp parallel { int thread_ID = omp_get_thread_num(); printf("hello world %d\n", thread_ID); } }
comm.h
/** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include "mxnet/ndarray.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { #if MXNET_USE_CUDA int gpu_num; int ret = cudaGetDeviceCount(&gpu_num); pinned_ctx_ = (ret == 0 && gpu_num > 0) ? Context::CPUPinned(0) : Context::CPU(); #else pinned_ctx_ = Context::CPU(); #endif } virtual ~Comm() { } /** * \brief init key with the data shape */ virtual void Init(int key, const TShape &shape) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } protected: Context pinned_ctx_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); } virtual ~CommCPU() { } void Init(int key, const TShape &shape) override { merge_buf_[key].merged = NDArray(shape, pinned_ctx_); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); auto& buf = merge_buf_[key]; CopyFromTo(src[0], &buf.merged, priority); reduce[0] = buf.merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { buf.copy_buf[j] = NDArray(src[0].shape(), pinned_ctx_); } } for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushSync([reduce, this](RunContext rctx) { ReduceSumCPU(reduce); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, PROFILER_MESSAGE("KVStoreReduce")); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // first copy data to cpu, then broadcast auto& buf = merge_buf_[key]; CopyFromTo(src, &buf.merged, priority); for (auto d : dst) CopyFromTo(buf.merged, d, priority); } } private: inline static void ReduceSumCPU( const std::vector<real_t*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); // ge ptr out std::vector<real_t*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, real_t>().dptr_; } size_t total = in_data[0].shape().Size(); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const TShape &shape) override { sorted_key_shape_.push_back(std::make_pair(key, shape)); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &(buf.merged), priority); reduce[0] = buf.merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf = merge_buf_[key]; CopyFromTo(src, &buf.merged, priority); for (auto d : dst) { CopyFromTo(buf.merged, d, priority); } } } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_CUDA std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); for (int i = 0; i < n; ++i) { cudaSetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0); if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } using KeyShape = std::pair<int, TShape>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_shape_.begin(), sorted_key_shape_.end(), []( const KeyShape& a, const KeyShape& b) { return a.second.Size() > b.second.Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (size_t i = 0; i < sorted_key_shape_.size(); ++i) { int k = sorted_key_shape_[i].first; TShape s = sorted_key_shape_[i].second; auto& buf = merge_buf_[k]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) { size_t size = it->second.second; if (size <= min_size) { ctx = it->second.first; min_size = size; } } buf.merged = NDArray(s, ctx); ctx_info[ctx.dev_id].second += s.Size(); } inited_ = true; } std::vector<KeyShape> sorted_key_shape_; /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the gpu buffer std::vector<NDArray> copy_buf; }; std::unordered_map<int, BufferEntry> merge_buf_; bool inited_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
ordered_dependences.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #include "callback.h" #include <omp.h> int main() { int a[10][10]; #pragma omp parallel num_threads(2) #pragma omp for ordered(2) for (int i = 0; i < 2; i++) for (int j = 0; j < 2; j++) { a[i][j] = i + j + 1; printf("%d, %d\n", i, j); #pragma omp ordered depend(sink : i - 1, j) depend(sink : i, j - 1) if (i > 0 && j > 0) a[i][j] = a[i - 1][j] + a[i][j - 1] + 1; printf("%d, %d\n", i, j); #pragma omp ordered depend(source) } return 0; } // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER:[0-9]+]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id={{[0-9]+}}, parent_task_id=[[ITASK:[0-9]+]], // CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]], // CHECK-SAME: deps=[(0, ompt_dependence_type_source), (0, // CHECK-SAME: ompt_dependence_type_source)], ndeps=2 // CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]], // CHECK-SAME: deps=[(0, ompt_dependence_type_sink), (0, // CHECK-SAME: ompt_dependence_type_sink)], ndeps=2 // CHECK: {{^}}[[MASTER]]: ompt_event_dependences: task_id=[[ITASK]], // CHECK-SAME: deps=[(0, ompt_dependence_type_source), (1, // CHECK-SAME: ompt_dependence_type_source)], ndeps=2 // CHECK: {{^}}[[WORKER:[0-9]+]]: ompt_event_loop_begin: // CHECK-SAME: parallel_id={{[0-9]+}}, parent_task_id=[[ITASK:[0-9]+]], // CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]], // CHECK-SAME: deps=[(0, ompt_dependence_type_sink), (0, // CHECK-SAME: ompt_dependence_type_sink)], ndeps=2 // CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]], // CHECK-SAME: deps=[(1, ompt_dependence_type_source), (0, // CHECK-SAME: ompt_dependence_type_source)], ndeps=2 // either can be first for last iteration // CHECK-DAG: [[ITASK]]{{.*}}deps=[(0{{.*}}sink), (1,{{.*}}sink)] // CHECK-DAG: [[ITASK]]{{.*}}deps=[(1{{.*}}sink), (0,{{.*}}sink)] // CHECK: {{^}}[[WORKER]]: ompt_event_dependences: task_id=[[ITASK]], // CHECK-SAME: deps=[(1, ompt_dependence_type_source), (1, // CHECK-SAME: ompt_dependence_type_source)], ndeps=2
GB_binop__minus_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__minus_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__minus_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__minus_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_fc64) // A*D function (colscale): GB (_AxD__minus_fc64) // D*A function (rowscale): GB (_DxB__minus_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__minus_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__minus_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_fc64) // C=scalar+B GB (_bind1st__minus_fc64) // C=scalar+B' GB (_bind1st_tran__minus_fc64) // C=A+scalar GB (_bind2nd__minus_fc64) // C=A'+scalar GB (_bind2nd_tran__minus_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // A pattern? 0 // B type: GxB_FC64_t // B pattern? 0 // BinaryOp: cij = GB_FC64_minus (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_minus (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_FC64 || GxB_NO_MINUS_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_fc64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC64_t alpha_scalar ; GxB_FC64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_minus (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_minus (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_minus (x, aij) ; \ } GrB_Info GB (_bind1st_tran__minus_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_minus (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__minus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_nonmonotonic_nowait.c
// RUN: %libomp-compile-and-run // The test checks nonmonotonic scheduling works correctly when threads // may execute different loops concurrently. #include <stdio.h> #include <omp.h> #define N 200 #define C 20 int main() { int i, l0 = 0, l1 = 0; #pragma omp parallel num_threads(8) { #pragma omp for schedule(nonmonotonic:dynamic,C) nowait for (i = 0; i < N; ++i) { #pragma omp atomic l0++; } #pragma omp for schedule(nonmonotonic:dynamic,C) nowait for (i = 0; i < N * N; ++i) { #pragma omp atomic l1++; } } if (l0 != N || l1 != N * N) { printf("failed l0 = %d, l1 = %d, should be %d %d\n", l0, l1, N, N * N); return 1; } else { printf("passed\n"); return 0; } }
GB_binop__max_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__max_int32 // A.*B function (eWiseMult): GB_AemultB__max_int32 // A*D function (colscale): GB_AxD__max_int32 // D*A function (rowscale): GB_DxB__max_int32 // C+=B function (dense accum): GB_Cdense_accumB__max_int32 // C+=b function (dense accum): GB_Cdense_accumb__max_int32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__max_int32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__max_int32 // C=scalar+B GB_bind1st__max_int32 // C=scalar+B' GB_bind1st_tran__max_int32 // C=A+scalar GB_bind2nd__max_int32 // C=A'+scalar GB_bind2nd_tran__max_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IMAX (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_INT32 || GxB_NO_MAX_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__max_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__max_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__max_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__max_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__max_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__max_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__max_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__max_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__max_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__max_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB_bind1st_tran__max_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB_bind2nd_tran__max_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
constitute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE % % C O O NN N SS T I T U U T E % % C O O N N N ESSS T I T U U T EEE % % C O O N NN SS T I T U U T E % % CCCC OOO N N SSSSS T IIIII T UUU T EEEEE % % % % % % MagickCore Methods to Consitute an Image % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/coder-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/constitute-private.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/identify.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n s t i t u t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteImage() returns an image from the pixel data you supply. % The pixel data must be in scanline order top-to-bottom. The data can be % char, short int, int, float, or double. Float and double require the % pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to % create a 640x480 image from unsigned red-green-blue character data, use: % % image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception); % % The format of the ConstituteImage method is: % % Image *ConstituteImage(const size_t columns,const size_t rows, % const char *map,const StorageType storage,const void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: width in pixels of the image. % % o rows: height in pixels of the image. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose % from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConstituteImage(const size_t columns,const size_t rows, const char *map,const StorageType storage,const void *pixels, ExceptionInfo *exception) { Image *image; MagickBooleanType status; ssize_t i; size_t length; /* Allocate image structure. */ assert(map != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map); assert(pixels != (void *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage((ImageInfo *) NULL,exception); if (image == (Image *) NULL) return((Image *) NULL); switch (storage) { case CharPixel: image->depth=8*sizeof(unsigned char); break; case DoublePixel: image->depth=8*sizeof(double); break; case FloatPixel: image->depth=8*sizeof(float); break; case LongPixel: image->depth=8*sizeof(unsigned long); break; case LongLongPixel: image->depth=8*sizeof(MagickSizeType); break; case ShortPixel: image->depth=8*sizeof(unsigned short); break; default: break; } length=strlen(map); for (i=0; i < (ssize_t) length; i++) { switch (map[i]) { case 'a': case 'A': case 'O': case 'o': { image->alpha_trait=BlendPixelTrait; break; } case 'C': case 'c': case 'm': case 'M': case 'Y': case 'y': case 'K': case 'k': { image->colorspace=CMYKColorspace; break; } case 'I': case 'i': { image->colorspace=GRAYColorspace; break; } default: { if (length == 1) image->colorspace=GRAYColorspace; break; } } } status=SetImageExtent(image,columns,rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImage() returns all the properties of an image or image sequence % except for the pixels. It is much faster and consumes far less memory % than ReadImage(). On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the PingImage method is: % % Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Ping the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t PingStream(const Image *magick_unused(image), const void *magick_unused(pixels),const size_t columns) { magick_unreferenced(image); magick_unreferenced(pixels); return(columns); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport Image *PingImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; ImageInfo *ping_info; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); ping_info=CloneImageInfo(image_info); ping_info->ping=MagickTrue; image=ReadStream(ping_info,&PingStream,exception); if (image != (Image *) NULL) { ResetTimer(&image->timer); if (ping_info->verbose != MagickFalse) (void) IdentifyImage(image,stdout,MagickFalse,exception); } ping_info=DestroyImageInfo(ping_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImages() pings one or more images and returns them as an image list. % % The format of the PingImage method is: % % Image *PingImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PingImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char ping_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Ping image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename, (int) image_info->scene,ping_filename,exception); if (LocaleCompare(ping_filename,image_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ read_info=CloneImageInfo(image_info); sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes == 0) { read_info=DestroyImageInfo(read_info); return(PingImage(image_info,exception)); } (void) CopyMagickString(ping_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename, (int) scene,read_info->filename,exception); image=PingImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } return(PingImage(image_info,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImage() reads an image or image sequence from a file or file handle. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadImage method is: % % Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Read the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsCoderAuthorized(const char *coder, const PolicyRights rights,ExceptionInfo *exception) { if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",coder); return(MagickFalse); } return(MagickTrue); } MagickExport Image *ReadImage(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent], magick[MagickPathExtent], magick_filename[MagickPathExtent]; const char *value; const DelegateInfo *delegate_info; const MagickInfo *magick_info; DecodeImageHandler *decoder; ExceptionInfo *sans_exception; GeometryInfo geometry_info; Image *image, *next; ImageInfo *read_info; MagickBooleanType status; MagickStatusType flags; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image_info->filename != (char *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); (void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent); (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) CopyMagickString(magick,read_info->magick,MagickPathExtent); /* Call appropriate image reader based on image type. */ sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(read_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(read_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) read_info->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } if ((magick_info != (const MagickInfo *) NULL) && (GetMagickDecoderSeekableStream(magick_info) != MagickFalse)) { image=AcquireImage(read_info,exception); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } if (IsBlobSeekable(image) == MagickFalse) { /* Coder requires a seekable stream. */ *read_info->filename='\0'; status=ImageToFile(image,read_info->filename,exception); if (status == MagickFalse) { (void) CloseBlob(image); read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } read_info->temporary=MagickTrue; } (void) CloseBlob(image); image=DestroyImage(image); } image=NewImageList(); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(read_info->filename,filename, MagickPathExtent); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); } } if (decoder != (DecodeImageHandler *) NULL) { /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=decoder(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); if (read_info->temporary != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Let our decoding delegate process the image. */ image=AcquireImage(read_info,exception); if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return((Image *) NULL); } (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); *read_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL, exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); image=DestroyImageList(image); read_info->temporary=MagickTrue; if (status != MagickFalse) (void) SetImageInfo(read_info,0,exception); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { if (IsPathAccessible(read_info->filename) != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); else ThrowFileException(exception,FileOpenError,"UnableToOpenFile", read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=(decoder)(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } if (read_info->temporary != MagickFalse) { (void) RelinquishUniqueFileResource(read_info->filename); read_info->temporary=MagickFalse; if (image != (Image *) NULL) (void) CopyMagickString(image->filename,filename,MagickPathExtent); } if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return(image); } if (exception->severity >= ErrorException) (void) LogMagickEvent(ExceptionEvent,GetMagickModule(), "Coder (%s) generated an image despite an error (%d), " "notify the developers",image->magick,exception->severity); if (IsBlobTemporary(image) != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) && (GetImageListLength(image) != 1)) { Image *clones; clones=CloneImages(image,read_info->scenes,exception); if (clones != (Image *) NULL) { image=DestroyImageList(image); image=GetFirstImageInList(clones); } } for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { char magick_path[MagickPathExtent], *property, timestamp[MagickTimeExtent]; const char *option; const StringInfo *profile; ssize_t option_type; static const char *source_date_epoch = (const char *) NULL; static MagickBooleanType epoch_initalized = MagickFalse; next->taint=MagickFalse; GetPathComponent(magick_filename,MagickPath,magick_path); if ((*magick_path == '\0') && (*next->magick == '\0')) (void) CopyMagickString(next->magick,magick,MagickPathExtent); (void) CopyMagickString(next->magick_filename,magick_filename, MagickPathExtent); if (IsBlobTemporary(image) != MagickFalse) (void) CopyMagickString(next->filename,filename,MagickPathExtent); if (next->magick_columns == 0) next->magick_columns=next->columns; if (next->magick_rows == 0) next->magick_rows=next->rows; (void) GetImageProperty(next,"exif:*",exception); (void) GetImageProperty(next,"icc:*",exception); (void) GetImageProperty(next,"iptc:*",exception); (void) GetImageProperty(next,"xmp:*",exception); value=GetImageProperty(next,"exif:Orientation",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:Orientation",exception); if (value != (char *) NULL) { next->orientation=(OrientationType) StringToLong(value); (void) DeleteImageProperty(next,"tiff:Orientation"); (void) DeleteImageProperty(next,"exif:Orientation"); } value=GetImageProperty(next,"exif:XResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.x; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.x=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:XResolution"); } value=GetImageProperty(next,"exif:YResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.y; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.y=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:YResolution"); } value=GetImageProperty(next,"exif:ResolutionUnit",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:ResolutionUnit",exception); if (value != (char *) NULL) { option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse, value); if (option_type >= 0) next->units=(ResolutionType) option_type; (void) DeleteImageProperty(next,"exif:ResolutionUnit"); (void) DeleteImageProperty(next,"tiff:ResolutionUnit"); } if (next->page.width == 0) next->page.width=next->columns; if (next->page.height == 0) next->page.height=next->rows; option=GetImageOption(read_info,"caption"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"caption",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"comment"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"comment",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"label"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"label",property,exception); property=DestroyString(property); } if (LocaleCompare(next->magick,"TEXT") == 0) (void) ParseAbsoluteGeometry("0x0+0+0",&next->page); if ((read_info->extract != (char *) NULL) && (read_info->stream == (StreamHandler) NULL)) { RectangleInfo geometry; SetGeometry(next,&geometry); flags=ParseAbsoluteGeometry(read_info->extract,&geometry); if ((next->columns != geometry.width) || (next->rows != geometry.height)) { if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { Image *crop_image; crop_image=CropImage(next,&geometry,exception); if (crop_image != (Image *) NULL) ReplaceImageInList(&next,crop_image); } else if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0)) { Image *size_image; flags=ParseRegionGeometry(next,read_info->extract,&geometry, exception); size_image=ResizeImage(next,geometry.width,geometry.height, next->filter,exception); if (size_image != (Image *) NULL) ReplaceImageInList(&next,size_image); } } } profile=GetImageProfile(next,"icc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"icm"); profile=GetImageProfile(next,"iptc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"8bim"); if (epoch_initalized == MagickFalse) { source_date_epoch=getenv("SOURCE_DATE_EPOCH"); epoch_initalized=MagickTrue; } if (source_date_epoch == (const char *) NULL) { (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime, sizeof(timestamp),timestamp); (void) SetImageProperty(next,"date:modify",timestamp,exception); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime, sizeof(timestamp),timestamp); (void) SetImageProperty(next,"date:create",timestamp,exception); } option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (next->delay > (size_t) floor(geometry_info.rho+0.5)) next->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (next->delay < (size_t) floor(geometry_info.rho+0.5)) next->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } else next->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) next->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) { option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse, option); if (option_type >= 0) next->dispose=(DisposeType) option_type; } if (read_info->verbose != MagickFalse) (void) IdentifyImage(next,stderr,MagickFalse,exception); image=next; } read_info=DestroyImageInfo(read_info); if (GetBlobError(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImages() reads one or more images and returns them as an image list. % % The format of the ReadImage method is: % % Image *ReadImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char read_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Read image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; (void) SetImageOption(read_info,"filename",filename); (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(read_info,(Image *) NULL,filename, (int) read_info->scene,read_filename,exception); if (LocaleCompare(read_filename,read_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes != 0) { (void) CopyMagickString(read_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); scene=(ssize_t) read_info->scene; for ( ; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL, read_filename,(int) scene,read_info->filename,exception); image=ReadImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } } (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d I n l i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadInlineImage() reads a Base64-encoded inline image or image sequence. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadInlineImage method is: % % Image *ReadInlineImage(const ImageInfo *image_info,const char *content, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o content: the image encoded in Base64. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadInlineImage(const ImageInfo *image_info, const char *content,ExceptionInfo *exception) { Image *image; ImageInfo *read_info; unsigned char *blob; size_t length; const char *p; /* Skip over header (e.g. data:image/gif;base64,). */ image=NewImageList(); for (p=content; (*p != ',') && (*p != '\0'); p++) ; if (*p == '\0') ThrowReaderException(CorruptImageError,"CorruptImage"); blob=Base64Decode(++p,&length); if (length == 0) { blob=(unsigned char *) RelinquishMagickMemory(blob); ThrowReaderException(CorruptImageError,"CorruptImage"); } read_info=CloneImageInfo(image_info); (void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL, (void *) NULL); *read_info->filename='\0'; *read_info->magick='\0'; for (p=content; (*p != '/') && (*p != '\0'); p++) ; if (*p != '\0') { char *q; ssize_t i; /* Extract media type. */ if (LocaleNCompare(++p,"x-",2) == 0) p+=2; (void) strcpy(read_info->filename,"data."); q=read_info->filename+5; for (i=0; (*p != ';') && (*p != '\0') && (i < (MagickPathExtent-6)); i++) *q++=(*p++); *q++='\0'; } image=BlobToImage(read_info,blob,length,exception); blob=(unsigned char *) RelinquishMagickMemory(blob); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImage() writes an image or an image sequence to a file or file handle. % If writing to a file is on disk, the name is defined by the filename member % of the image structure. WriteImage() returns MagickFalse is there is a % memory shortage or if the image cannot be written. Check the exception % member of image to determine the cause for any failure. % % The format of the WriteImage method is: % % MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; const MagickInfo *magick_info; EncodeImageHandler *encoder; ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType status, temporary; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); sans_exception=AcquireExceptionInfo(); write_info=CloneImageInfo(image_info); (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) SetImageInfo(write_info,1,sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); /* Call appropriate image writer based on image type. */ magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) image->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } (void) SyncImageProfiles(image); DisassociateImageStream(image); option=GetImageOption(image_info,"delegate:bimodal"); if ((IsStringTrue(option) != MagickFalse) && (write_info->page == (char *) NULL) && (GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) == (Image *) NULL) && (IsTaintImage(image) == MagickFalse) ) { delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception); if ((delegate_info != (const DelegateInfo *) NULL) && (GetDelegateMode(delegate_info) == 0) && (IsPathAccessible(image->magick_filename) != MagickFalse)) { /* Process image with bi-modal delegate. */ (void) CopyMagickString(image->filename,image->magick_filename, MagickPathExtent); status=InvokeDelegate(write_info,image,image->magick, write_info->magick,exception); write_info=DestroyImageInfo(write_info); (void) CopyMagickString(image->filename,filename,MagickPathExtent); return(status); } } status=MagickFalse; temporary=MagickFalse; if ((magick_info != (const MagickInfo *) NULL) && (GetMagickEncoderSeekableStream(magick_info) != MagickFalse)) { char image_filename[MagickPathExtent]; (void) CopyMagickString(image_filename,image->filename,MagickPathExtent); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); (void) CopyMagickString(image->filename, image_filename,MagickPathExtent); if (status != MagickFalse) { if (IsBlobSeekable(image) == MagickFalse) { /* A seekable stream is required by the encoder. */ write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) AcquireUniqueFilename(image->filename); temporary=MagickTrue; } (void) CloseBlob(image); } } encoder=GetImageEncoder(magick_info); if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception); if (delegate_info != (DelegateInfo *) NULL) { /* Process the image with delegate. */ *write_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(write_info,image,(char *) NULL, write_info->magick,exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); (void) CopyMagickString(image->filename,filename,MagickPathExtent); } else { sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((write_info->affirm == MagickFalse) && (magick_info == (const MagickInfo *) NULL)) { (void) CopyMagickString(write_info->magick,image->magick, MagickPathExtent); magick_info=GetMagickInfo(write_info->magick,exception); } encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) { char extension[MagickPathExtent]; GetPathComponent(image->filename,ExtensionPath,extension); if (*extension != '\0') magick_info=GetMagickInfo(extension,exception); else magick_info=GetMagickInfo(image->magick,exception); (void) CopyMagickString(image->filename,filename, MagickPathExtent); encoder=GetImageEncoder(magick_info); } if (encoder == (EncodeImageHandler *) NULL) { magick_info=GetMagickInfo(image->magick,exception); encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoEncodeDelegateForThisImageFormat", "`%s'",write_info->magick); } if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights, exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } } } if (temporary != MagickFalse) { /* Copy temporary image file to permanent. */ status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception); if (status != MagickFalse) { (void) RelinquishUniqueFileResource(write_info->filename); status=ImageToFile(image,write_info->filename,exception); } (void) CloseBlob(image); (void) RelinquishUniqueFileResource(image->filename); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); } if ((LocaleCompare(write_info->magick,"info") != 0) && (write_info->verbose != MagickFalse)) (void) IdentifyImage(image,stdout,MagickFalse,exception); write_info=DestroyImageInfo(write_info); if (GetBlobError(image) != MagickFalse) ThrowWriterException(FileOpenError,"UnableToWriteFile"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImages() writes an image sequence into one or more files. While % WriteImage() can write an image sequence, it is limited to writing % the sequence into a single file using a format which supports multiple % frames. WriteImages(), however, does not have this limitation, instead it % generates multiple output files if necessary (or when requested). When % ImageInfo's adjoin flag is set to MagickFalse, the file name is expected % to include a printf-style formatting string for the frame number (e.g. % "image%02d.png"). % % The format of the WriteImages method is: % % MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o images: the image list. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info, Image *images,const char *filename,ExceptionInfo *exception) { #define WriteImageTag "Write/Image" ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType proceed; MagickOffsetType progress; MagickProgressMonitor progress_monitor; MagickSizeType number_images; MagickStatusType status; Image *p; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); write_info=CloneImageInfo(image_info); *write_info->magick='\0'; images=GetFirstImageInList(images); if (filename != (const char *) NULL) for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) (void) CopyMagickString(p->filename,filename,MagickPathExtent); (void) CopyMagickString(write_info->filename,images->filename, MagickPathExtent); sans_exception=AcquireExceptionInfo(); (void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images), sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent); p=images; for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p)) { Image *next; next=GetNextImageInList(p); if (next == (Image *) NULL) break; if (p->scene >= next->scene) { ssize_t i; /* Generate consistent scene numbers. */ i=(ssize_t) images->scene; for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) p->scene=(size_t) i++; break; } } /* Write images. */ status=MagickTrue; progress_monitor=(MagickProgressMonitor) NULL; progress=0; number_images=GetImageListLength(images); for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) { if (number_images != 1) progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL, p->client_data); status&=WriteImage(write_info,p,exception); if (number_images != 1) (void) SetImageProgressMonitor(p,progress_monitor,p->client_data); if (write_info->adjoin != MagickFalse) break; if (number_images != 1) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(p,WriteImageTag,progress,number_images); if (proceed == MagickFalse) break; } } write_info=DestroyImageInfo(write_info); return(status != 0 ? MagickTrue : MagickFalse); }
sum.c
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <R.h> #include <Rmath.h> #include <Rinternals.h> #if defined _OPENMP #include <omp.h> #endif double sumKernel( double * x, /* naip image */ double * W, /* pre computed spatial weights */ size_t i, /* current location in columns */ size_t j, /* current location in rows */ size_t dRow, size_t dCol, size_t nRow, /* number of Rows */ size_t nCol /* number of Columns */ ) { /* adjustment that must be applied for edge effects */ size_t k, l; size_t k_start; size_t k_stop; size_t l_start; size_t l_stop; double mu = 0; size_t k_local; size_t l_local; /* the starts */ if( i < dRow/2 ) { k_start = 0; } else { k_start = i - dRow/2 ; } if( j < dCol/2 ) { l_start = 0; } else { l_start = j - dCol/2 ; } /* the stops */ if( i + dRow/2 + 1 > nRow ) { k_stop = nRow; } else { k_stop = i + dRow/2 + 1; } if( j + dCol/2 + 1 > nCol ) { l_stop = nCol; } else { l_stop = j + dCol/2 + 1; } if( x[i*nCol + j] == INFINITY ) return( INFINITY); if( x[i*nCol + j] == -INFINITY ) return( -INFINITY); if( x[i*nCol + j] == NAN ) return( NAN); /* first pass variance */ for( k=k_start, k_local=k_start - i + (dRow/2); k < k_stop; k++, k_local++ ) { for( l=l_start, l_local=l_start -j + (dCol/2); l < l_stop; l++, l_local++ ) { if( x[k * nCol + l] == INFINITY ) continue; if( x[k * nCol + l] == -INFINITY ) continue; if( x[k * nCol + l] == NAN ) continue; mu += x[k * nCol + l] * W[ k_local*dCol + l_local]; } } return( mu ) ; } void rSmoothSums( double * x, /* this is the multi year naip images */ double * mu, /* this is the input/returned mu */ double * WMu, /* weight */ int * nRowPtr, int * nColPtr, int * dRowPtr, int * dColPtr ) { /* move R ints to size_t */ size_t dRow = *dRowPtr; size_t dCol = *dColPtr; size_t nRow = *nRowPtr; size_t nCol = *nColPtr; size_t i,j; int tid; #pragma omp parallel for private(j) for( i=0; i < nRow; i++) { for( j=0; j < nCol; j++) { mu[i*nCol + j] = sumKernel( x, WMu, i,j,dRow,dCol,nRow,nCol); } } #pragma omp barrier return; }
module_bl_mynn_mym_level2_impl.h
#ifndef __MODULE_BL_MYNN_MYM_LEVEL2_IMPL_H__ #define __MODULE_BL_MYNN_MYM_LEVEL2_IMPL_H__ // File version granularity. #ifndef MODULE_BL_MYNN_MYM_LEVEL2_IMPL_VERSION_MAJOR #define MODULE_BL_MYNN_MYM_LEVEL2_IMPL_VERSION_MAJOR 1 #endif #ifndef MODULE_BL_MYNN_MYM_LEVEL2_IMPL_VERSION_MINOR #define MODULE_BL_MYNN_MYM_LEVEL2_IMPL_VERSION_MINOR 0 #endif #ifndef MODULE_BL_MYNN_MYM_LEVEL2_IMPL_PATCH_VERSION #define MODULE_BL_MYNN_MYM_LEVEL2_IMPL_PATCH_VERSION 0 #endif #ifndef MODULE_BL_MYNN_MYM_LEVEL2_IMPL_CREATE_DATE #define MODULE_BL_MYNN_MYM_LEVEL2_IMPL_CREATE_DATE "Date: 07-11-2016 , Time: 12:37 PM GMT+2" #endif // Set this value to successful build date/time. #ifndef MODULE_BL_MYNN_MYM_LEVEL2_IMPL_BUILD_DATE #define MODULE_BL_MYNN_MYM_LEVEL2_IMPL_BUILD_DATE "" #endif #ifndef MODULE_BL_MYNN_MYM_LEVEL2_IMPL_AUTHOR #define MODULE_BL_MYNN_MYM_LEVEL2_IMPL_AUTHOR "Name: Bernard Gingold , e-mail: beniekg@gmail.com" #endif #include "module_bl_mynn_F90_iface.h" #include "PhysLib_Config.h" #include "std_headers.h" namespace wrf_phys_wrappers { namespace module_bl_mynn { template<typename R32 = float, typename I32 = int > struct Wrap_Mym_Level2 { /****************************************** Constructors and Destructor. *******************************************/ /* @Purpose: Default Constructor - explicitly default. */ Wrap_Mym_Level2() = default; /* @Purpose: 1st 'main' Constructor which purpose is to allocate and initialize scalar and array members. Array members are zero-filled. Caller must later initialize input arrays to correct physical state. */ Wrap_Mym_Level2(_In_ const I32 KTS, _In_ const I32 KTE) : m_KTS{ KTS }, m_KTE{ KTE }, m_dz{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_u{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_v{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_thl{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_qw{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_ql{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_vt{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_vq{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_dtl{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_dqw{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_dtv{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_gm{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_gh{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_sm{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_sh{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) } { // Checking for memory allocation errors i.e. (malloc failures). for (int i{ 0 }; i != this->m_totArrays; ++i) { if ((&this->m_dz)[i] == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 1st Ctor: 'Wrap_Mym_Level2'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_dz)[i] << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } } // Zero-initialize arrays. // Using OpenMP and vectorization for // innermost stride. // Zero-initialization of Arrays 1D. #if defined (USE_ICL_OPENMP) && \ OPENMP_CURR_VER >= 40 #pragma omp parallel for if(m_KTE >= (1 << 20)) #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int i = m_KTS; i != m_KTE; ++i) { this->m_dz[i] = 0.F; this->m_u[i] = 0.F; this->m_v[i] = 0.F; this->m_thl[i] = 0.F; this->m_qw[i] = 0.F; this->m_ql[i] = 0.F; this->m_vt[i] = 0.F; this->m_vq[i] = 0.F; this->m_dtl[i] = 0.F; this->m_dqw[i] = 0.F; this->m_dtv[i] = 0.F; this->m_gm[i] = 0.F; this->m_gh[i] = 0.F; this->m_sm[i] = 0.F; this->m_sh[i] = 0.F; } #else #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int i = m_KTS; i != m_KTE; ++i) { this->m_dz[i] = 0.F; this->m_u[i] = 0.F; this->m_v[i] = 0.F; this->m_thl[i] = 0.F; this->m_qw[i] = 0.F; this->m_ql[i] = 0.F; this->m_vt[i] = 0.F; this->m_vq[i] = 0.F; this->m_dtl[i] = 0.F; this->m_dqw[i] = 0.F; this->m_dtv[i] = 0.F; this->m_gm[i] = 0.F; this->m_gh[i] = 0.F; this->m_sm[i] = 0.F; this->m_sh[i] = 0.F; } #endif } /* @Purpose: 2nd 'main' Constructor which purpose is to allocate and initialize scalar and array members. Array output members are zero-filled. Caller must pass initialized input arrays to correct physical state. */ Wrap_Mym_Level2(_In_ const I32 KTS, _In_ const I32 KTE, _In_ R32* __restrict const dz, _In_ R32* __restrict const u, _In_ R32* __restrict const v, _In_ R32* __restrict const thl, _In_ R32* __restrict const qw, _In_ R32* __restrict const ql, _In_ R32* __restrict const vt, _In_ R32* __restrict const vq) : m_KTS{ KTS }, m_KTE{ KTE }, m_dz{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_u{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_v{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_thl{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_qw{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_ql{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_vt{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_vq{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_dtl{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_dqw{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_dtv{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_gm{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_gh{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_sm{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_sh{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) } { // Check for memory alloction errors i.e. (malloc failures). for (int i{ 0 }; i != this->m_totArrays; ++i) { if ((&this->m_dz)[i] == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 2nd Ctor: 'Wrap_Mym_Level2'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_dz)[i] << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } } // Check for null pointer occurrence. if (dz == NULL || u == NULL || v == NULL || thl == NULL || qw == NULL || ql == NULL || vt == NULL || vq == NULL || ) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 2nd Ctor: 'Wrap_Mym_Level2'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "One or more caller's arrays contains invalid pointer!!\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } // Using OpenMP and vectorization for // innermost stride. // Copying input Arrays 1D. #if defined (USE_ICL_OPENMP) && \ OPENMP_CURR_VER >= 40 #pragma omp parallel for if(m_KTE >= (1 << 20)) #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int i = m_KTS; i != m_KTE; ++i) { this->m_dz[i] = dz[i]; this->m_u[i] = u[i]; this->m_v[i] = v[i]; this->m_thl[i] = thl[i]; this->m_qw[i] = qw[i]; this->m_ql[i] = ql[i]; this->m_vt[i] = vt[i]; this->m_vq[i] = vq[i]; this->m_dtl[i] = 0.F; this->m_dqw[i] = 0.F; this->m_dtv[i] = 0.F; this->m_gm[i] = 0.F; this->m_gh[i] = 0.F; this->m_sm[i] = 0.F; this->m_sh[i] = 0.F; } #else #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int i = m_KTS; i != m_KTE; ++i) { this->m_dz[i] = dz[i]; this->m_u[i] = u[i]; this->m_v[i] = v[i]; this->m_thl[i] = thl[i]; this->m_qw[i] = qw[i]; this->m_ql[i] = ql[i]; this->m_vt[i] = vt[i]; this->m_vq[i] = vq[i]; this->m_dtl[i] = 0.F; this->m_dqw[i] = 0.F; this->m_dtv[i] = 0.F; this->m_gm[i] = 0.F; this->m_gh[i] = 0.F; this->m_sm[i] = 0.F; this->m_sh[i] = 0.F; } #endif } /* @Purpose: Copy Constructor implements deep copy semantics. */ Wrap_Mym_Level2(_In_ const Wrap_Mym_Level2 &x) : m_KTS{ x.m_KTS }, m_KTE{ x.m_KTE }, m_dz{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_u{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_v{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_thl{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_qw{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_ql{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_vt{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_vq{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_dtl{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_dqw{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_dtv{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_gm{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_gh{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_sm{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) }, m_sh{ reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)), align32B)) } { // Check for memory allocation error i.e. (malloc failures). for (int i{ 0 }; i != this->m_totArrays; ++i) { if ((&this->m_dz)[i] == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in Copy-Ctor: 'Wrap_Mym_Level2'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_dz)[i] << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } } #if defined (USE_ICL_OPENMP) && \ OPENMP_CURR_VER >= 40 #pragma omp parallel for if(m_KTE >= (1 << 20)) #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int i = m_KTS; i != m_KTE; ++i) { this->m_dz[i] = x.m_dz[i]; this->m_u[i] = x.m_u[i]; this->m_v[i] = x.m_v[i]; this->m_thl[i] = x.m_thl[i]; this->m_qw[i] = x.m_qw[i]; this->m_ql[i] = x.m_ql[i]; this->m_vt[i] = x.m_vt[i]; this->m_vq[i] = x.m_vq[i]; this->m_dtl[i] = x.m_dtl[i]; this->m_dqw[i] = x.m_dqw[i]; this->m_dtv[i] = x.m_dtv[i]; this->m_gm[i] = x.m_gm[i]; this->m_gh[i] = x.m_gh[i]; this->m_sm[i] = x.m_sm[i]; this->m_sh[i] = x.m_sh[i]; } #else #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int i = m_KTS; i != m_KTE; ++i) { this->m_dz[i] = x.m_dz[i]; this->m_u[i] = x.m_u[i]; this->m_v[i] = x.m_v[i]; this->m_thl[i] = x.m_thl[i]; this->m_qw[i] = x.m_qw[i]; this->m_ql[i] = x.m_ql[i]; this->m_vt[i] = x.m_vt[i]; this->m_vq[i] = x.m_vq[i]; this->m_dtl[i] = x.m_dtl[i]; this->m_dqw[i] = x.m_dqw[i]; this->m_dtv[i] = x.m_dtv[i]; this->m_gm[i] = x.m_gm[i]; this->m_gh[i] = x.m_gh[i]; this->m_sm[i] = x.m_sm[i]; this->m_sh[i] = x.m_sh[i]; } #endif } /* @Purpose: Move Constructor implements shallow copy semantics. */ Wrap_Mym_Level2(_In_ Wrap_Mym_Level2 &&x) : m_KTS{ x.m_KTS }, m_KTE{ x.m_KTE } { // Reassign x's pointers. for (int i{ 0 }; i != this->m_totArrays; ++i) { (&this->m_dz)[i] = (&x.m_dz)[i]; } // Nullify x's pointers. for (int i{ 0 }; i != x.m_totArrays; ++i) { (&x.m_dz)[i] = NULL; } x.m_KTS = x.m_KTE = 0; } /* @Purpose: Class Destructor. */ ~Wrap_Mym_Level2() { for (int i{ 0 }; i != this->m_totArrays; ++i) { if ((&this->m_dz)[i]) { _mm_free((&this->m_dz)[i]); } } this->m_KTS = this->m_KTE = 0; } /* @Purpose: Copy-assign Operator implements deep copy semantics. */ Wrap_Mym_Level2 & operator=(_In_ const Wrap_Mym_Level2 &x) { if (this == &x) return (*this); this->m_KTS = x.m_KTS; this->m_KTE = x.m_KTE; constexpr int ntPtrs1D{15}; R32 *tPtrs1D[ntPtrs1D] = {}; for (int i{ 0 }; i != this->m_totArrays; ++i) { tPtrs1D[i] = reinterpret_cast<R32*>(_mm_malloc((m_KTE * sizeof(R32)),align32B)); } for (int i{ 0 }; i != this->m_totArrays; ++i) { if (tPtrs1D[i] == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in Copy Operator: 'Wrap_Mym_Level2'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << tPtrs1D[i] << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } } #if defined (USE_ICL_OPENMP) && \ OPENMP_CURR_VER >= 40 #pragma omp parallel for if(m_KTE >= (1 << 20)) for (int idx = 0; idx != this->m_totArrays; ++idx) { #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int i = m_KTS; i != m_KTE; ++i) { tPtrs1D[idx][i] = (&x.m_dz)[idx][i]; } } // Deallocate current context of *this. for (int i{ 0 }; i != this->m_totArrays; ++i) { _mm_free((&this->m_dz)[i]); } // Copy temp pointers to *this. for (int i{ 0 }; i != this->m_totArrays; ++i) { (&this->m_dz)[i] = tPtrs1D[i]; } return (*this); #else for (int idx = 0; idx != this->m_totArrays; ++idx) { #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int i = m_KTS; i != m_KTE; ++i) { tPtrs1D[idx][i] = (&x.m_dz)[idx][i]; } } // Deallocate current context of *this. for (int i{ 0 }; i != this->m_totArrays; ++i) { _mm_free((&this->m_dz)[i]); } // Copy temp pointers to *this. for (int i{ 0 }; i != this->m_totArrays; ++i) { (&this->m_dz)[i] = tPtrs1D[i]; } return (*this); #endif } /* @Purpose: Move-assign Operator implements shallow copy semantics. */ Wrap_Mym_Level2 & operator=(_In_ Wrap_Mym_Level2 &&x) { if (this == &x) return (*this); this->m_KTS = x.m_KTS; this->m_KTE = x.m_KTE; // Deallocate current state. for (int i{ 0 }; i != this->m_totArrays; ++i) { _mm_free((&this->m_dz)[i]); } // Reassign x's pointers to *this's pointers. for (int i{ 0 }; i != this->m_totArrays; ++i) { (&this->m_dz)[i] = (&x.m_dz)[i]; } // Nullify x's pointers. for (int i{ 0 }; i != this->m_totArrays; ++i) { (&x.m_dz)[i] = NULL; } x.m_KTS = 0; x.m_KTE = 0; return (*this); } /* @Purpose: Call Fortran 90 'MYM_LEVEL2' subroutine. */ void Call_Mym_Level2() { MODULE_BL_MYNN_mp_MYM_LEVEL2(&this->m_KTS, &this->m_KTE, &this->m_dz[0], &this->m_u[0], &this->m_v[0], &this->m_thl[0], &this->m_qw[0], &this->m_ql[0], &this->m_vt[0], &this->m_vq[0], &this->m_dtl[0], &this->m_dqw[0], &this->m_dtv[0], &this->m_gm[0], &this->m_gh[0], &this->m_sm[0], &this->m_sh[0] ); } /* @Purpose: Member variables. */ I32 m_KTS; I32 m_KTE; // Input arrays. _Field_size_(m_KTE) R32* __restrict m_dz; _Field_size_(m_KTE) R32* __restrict m_u; _Field_size_(m_KTE) R32* __restrict m_v; _Field_size_(m_KTE) R32* __restrict m_thl; _Field_size_(m_KTE) R32* __restrict m_qw; _Field_size_(m_KTE) R32* __restrict m_ql; _Field_size_(m_KTE) R32* __restrict m_vt; _Field_size_(m_KTE) R32* __restrict m_vq; // Output arrays. _Field_size_(m_KTE) R32* __restrict m_dtl; // Vertical gradient of Theta_l (K/m) _Field_size_(m_KTE) R32* __restrict m_dqw; // Vertical gradient of Q_w _Field_size_(m_KTE) R32* __restrict m_dtv; // Vertical gradient of Theta_V (K/m) _Field_size_(m_KTE) R32* __restrict m_gm; // G_M divided by L^2/q^2 (s^(-2)) _Field_size_(m_KTE) R32* __restrict m_gh; // G_H divided by L^2/q^2 (s^(-2)) _Field_size_(m_KTE) R32* __restrict m_sm; // Stability function for momentum, at Level 2 _Field_size_(m_KTE) R32* __restrict m_sh; // Stability function for heat, at Level 2 const static int m_totArrays = 15; }; } } #endif /*__MODULE_BL_MYNN_MYM_LEVEL2_IMPL_H__*/
ode_solver.c
// // Created by sachetto on 02/10/17. // #include "ode_solver.h" #include <string.h> #ifdef _MSC_VER #include "../dlfcn-win32/dlfcn.h" #else #include <dlfcn.h> #endif #include <assert.h> #include "../utils/logfile_utils.h" #ifdef COMPILE_CUDA #include "../gpu_utils/gpu_utils.h" #endif struct ode_solver* new_ode_solver() { struct ode_solver* result = (struct ode_solver *) malloc(sizeof(struct ode_solver)); result->sv = NULL; result->cells_to_solve = NULL; result->handle = NULL; result->get_cell_model_data = NULL; result->set_ode_initial_conditions_cpu = NULL; result->solve_model_ode_cpu = NULL; result->set_ode_initial_conditions_gpu = NULL; result->solve_model_ode_gpu = NULL; //result->update_gpu_fn = NULL; result->model_data.initial_v = INFINITY; result->model_data.number_of_ode_equations = -1; result->edo_extra_data = NULL; result->edo_extra_data = 0; //init_ode_solver_with_cell_model(result); return result; } void free_ode_solver(struct ode_solver *solver) { if(solver->sv) { if(solver->gpu) { #ifdef COMPILE_CUDA cudaFree(solver->sv); #endif } else { free(solver->sv); } } if(solver->edo_extra_data) { free(solver->edo_extra_data); } if(solver->cells_to_solve) { free(solver->cells_to_solve); } if(solver->model_data.model_library_path) { free(solver->model_data.model_library_path); } if(solver->handle) { dlclose(solver->handle); } free(solver); } void init_ode_solver_with_cell_model(struct ode_solver* solver) { char *error; if(!solver->model_data.model_library_path) { fprintf(stderr, "model_library_path not provided. Exiting!\n"); exit(1); } print_to_stdout_and_file("Opening %s as model lib\n", solver->model_data.model_library_path); solver->handle = dlopen (solver->model_data.model_library_path, RTLD_LAZY); if (!solver->handle) { fprintf(stderr, "%s\n", dlerror()); exit(1); } solver->get_cell_model_data = dlsym(solver->handle, "init_cell_model_data"); if ((error = dlerror()) != NULL) { fprintf(stderr, "%s\n", error); fprintf(stderr, "init_cell_model_data function not found in the provided model library\n"); if(!isfinite(solver->model_data.initial_v)) { fprintf(stderr, "intial_v not provided in the [cell_model] of the config file! Exiting\n"); exit(1); } } solver->set_ode_initial_conditions_cpu = dlsym(solver->handle, "set_model_initial_conditions_cpu"); if ((error = dlerror()) != NULL) { fprintf(stderr, "%s\n", error); fprintf(stderr, "set_model_initial_conditions function not found in the provided model library\n"); exit(1); } solver->solve_model_ode_cpu = dlsym(solver->handle, "solve_model_odes_cpu"); if ((error = dlerror()) != NULL) { fprintf(stderr, "%s\n", error); fprintf(stderr, "solve_model_odes_cpu function not found in the provided model library\n"); exit(1); } #ifdef COMPILE_CUDA solver->set_ode_initial_conditions_gpu = dlsym(solver->handle, "set_model_initial_conditions_gpu"); if ((error = dlerror()) != NULL) { fputs(error, stderr); fprintf(stderr, "set_model_initial_conditions_gpu function not found in the provided model library\n"); exit(1); } solver->solve_model_ode_gpu = dlsym(solver->handle, "solve_model_odes_gpu"); if ((error = dlerror()) != NULL) { fputs(error, stderr); fprintf(stderr, "\nsolve_model_odes_gpu function not found in the provided model library\n"); exit(1); } /*solver->update_gpu_fn = dlsym(solver->handle, "update_gpu_after_refinement"); if ((error = dlerror()) != NULL) { fputs(error, stderr); fprintf(stderr, "update_gpu_after_refinement function not found in the provided model library\n"); exit(1); }*/ #endif } void set_ode_initial_conditions_for_all_volumes(struct ode_solver *solver, uint32_t num_cells) { bool get_initial_v = !isfinite(solver->model_data.initial_v); bool get_neq = solver->model_data.number_of_ode_equations == -1; (*(solver->get_cell_model_data))(&(solver->model_data), get_initial_v, get_neq); int n_odes = solver->model_data.number_of_ode_equations; if (solver->gpu) { #ifdef COMPILE_CUDA set_ode_initial_conditions_gpu_fn *soicg_fn_pt = solver->set_ode_initial_conditions_gpu; if(!soicg_fn_pt) { fprintf(stderr, "The ode solver was set to use the GPU, \n " "but no function called set_model_initial_conditions_gpu " "was provided in the %s shared library file\n", solver->model_data.model_library_path); exit(11); } if(solver->sv != NULL) { check_cuda_errors(cudaFree(solver->sv)); } solver->pitch = soicg_fn_pt(&(solver->sv), num_cells); #endif } else { set_ode_initial_conditions_cpu_fn *soicc_fn_pt = solver->set_ode_initial_conditions_cpu; if(!soicc_fn_pt) { fprintf(stderr, "The ode solver was set to use the CPU, \n " "but no function called set_model_initial_conditions_cpu " "was provided in the %s shared library file\n", solver->model_data.model_library_path); exit(11); } if(solver->sv != NULL) { free(solver->sv); } solver->sv = (real*)malloc(n_odes*num_cells*sizeof(real)); int i; #pragma omp parallel for for(i = 0; i < num_cells; i++) { soicc_fn_pt(solver->sv + (i*n_odes)); } } } void solve_all_volumes_odes(struct ode_solver *the_ode_solver, uint32_t n_active, double cur_time, int num_steps, struct stim_config_hash *stim_configs) { assert(the_ode_solver->sv); real dt = the_ode_solver->min_dt; //int n_odes = the_ode_solver->model_data.number_of_ode_equations; real *sv = the_ode_solver->sv; void *extra_data = the_ode_solver->edo_extra_data; size_t extra_data_size = the_ode_solver->extra_data_size; double time = cur_time; real *merged_stims = (real*)calloc(sizeof(real), n_active); struct stim_config *tmp = NULL; real stim_period; real stim_start, stim_duration; real start_period, end_period, period_step; int n_cycles; real new_time; int i; if(stim_configs) { for (int k = 0; k < stim_configs->size; k++) { for (struct stim_config_elt *e = stim_configs->table[k % stim_configs->size]; e != 0; e = e->next) { tmp = e->value; stim_start = tmp->stim_start; stim_duration = tmp->stim_duration; start_period = tmp->start_period; end_period = tmp->end_period; period_step = tmp->period_step; n_cycles = tmp->n_cycles; for (int j = 0; j < num_steps; ++j) { new_time = 0.0f; // New Jhonny stimulus protocol for alternans simulations ... for (double new_period = start_period; new_period >= end_period; new_period -= period_step) { if ( time >= new_time && (time < new_time + n_cycles*new_period || new_period == end_period) ) { stim_period = new_period; time -= new_time; break; } new_time += n_cycles*new_period; } if( (time-floor(time/stim_period)*stim_period>=stim_start) && ( time - floor(time/stim_period)*stim_period <= stim_start + stim_duration ) ) { #pragma omp parallel for for (i = 0; i < n_active; i++) { merged_stims[i] = tmp->spatial_stim_currents[i]; } } // Old Sachetto's stimulus protocol ... /* if ((time >= stim_start) && (time <= stim_start + stim_duration)) { #pragma omp parallel for for (i = 0; i < n_active; i++) { merged_stims[i] = tmp->spatial_stim_currents[i]; } } */ time += dt; } time = cur_time; } } } if(the_ode_solver->gpu) { #ifdef COMPILE_CUDA solve_model_ode_gpu_fn *solve_odes_pt = the_ode_solver->solve_model_ode_gpu; solve_odes_pt(dt, sv, merged_stims, the_ode_solver->cells_to_solve, n_active, num_steps, extra_data, extra_data_size); #endif } else { solve_model_ode_cpu_fn *solve_odes_pt = the_ode_solver->solve_model_ode_cpu; solve_odes_pt(dt, sv, merged_stims, the_ode_solver->cells_to_solve, n_active, num_steps, extra_data); } free(merged_stims); } void update_state_vectors_after_refinement(struct ode_solver *ode_solver, const uint32_t *refined_this_step) { assert(ode_solver); assert(ode_solver->sv); size_t num_refined_cells = sb_count(refined_this_step)/8; real *sv = ode_solver->sv; int neq = ode_solver->model_data.number_of_ode_equations; real *sv_src; real *sv_dst; size_t i; if(ode_solver->gpu) { #ifdef COMPILE_CUDA size_t pitch_h = ode_solver->pitch; #pragma omp parallel for private(sv_src, sv_dst) for (i = 0; i < num_refined_cells; i++) { size_t index_id = i * 8; uint32_t index = refined_this_step[index_id]; sv_src = &sv[index]; for (int j = 1; j < 8; j++) { index = refined_this_step[index_id + j]; sv_dst = &sv[index]; check_cuda_errors(cudaMemcpy2D(sv_dst, pitch_h, sv_src, pitch_h, sizeof(real), (size_t )neq, cudaMemcpyDeviceToDevice)); } } //TODO: test if is faster to update the GPU using a kernel or a host function with cudaMemcpy2D //ode_solver->update_gpu_fn(sv, refined_this_step->base, num_refined_cells, neq); #endif } else { #pragma omp parallel for private(sv_src, sv_dst) for (i = 0; i < num_refined_cells; i++) { size_t index_id = i * 8; uint32_t index = refined_this_step[index_id]; sv_src = &sv[index * neq]; for (int j = 1; j < 8; j++) { index = refined_this_step[index_id + j]; sv_dst = &sv[index * neq]; memcpy(sv_dst, sv_src, neq * sizeof(real)); } } } } void configure_ode_solver_from_options(struct ode_solver *solver, struct user_options *options) { solver->gpu_id = options->gpu_id; solver->min_dt = (real)options->dt_edo; solver->gpu = options->gpu; if(options->model_file_path) { solver->model_data.model_library_path = strdup(options->model_file_path); } }
atomic.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> main() { float *x,*y,*work1,*work2; float sum; int *index; int n,i; n=1000; x=(float*)malloc(n*sizeof(float)); y=(float*)malloc(n*sizeof(float)); work1=(float*)malloc(n*sizeof(float)); work2=(float*)malloc(n*sizeof(float)); index=(int*)malloc(n*sizeof(float)); srand((unsigned) n); for( i=0;i < n;i++) { // index[i]=(n-i)-1; index[i]=(rand() % (n/10)); x[i]=0.0; y[i]=0.0; work1[i]=i; work2[i]=i*i; } sum=0; #pragma omp parallel for shared(x,y,index,n,sum) for( i=0;i< n;i++) { #pragma omp atomic x[index[i]] += work1[i]; #pragma omp atomic sum+= work1[i]; y[i] += work2[i]; } for( i=0;i < n;i++) printf("%d %g %g\n",i,x[i],y[i]); printf("sum %g\n",sum); }
ast-dump-openmp-distribute-parallel-for-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp distribute parallel for simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp distribute parallel for simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp distribute parallel for simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp distribute parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp distribute parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective {{.*}} <line:4:9, col:41> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:4:9) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective {{.*}} <line:10:9, col:41> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:10:9) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective {{.*}} <line:17:9, col:53> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:42, col:52> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:51> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:51> 'int' 1 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:17:9) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPDistributeParallelForSimdDirective {{.*}} <line:24:9, col:53> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:42, col:52> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:51> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:51> 'int' 2 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:24:9) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPDistributeParallelForSimdDirective {{.*}} <line:31:9, col:53> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:42, col:52> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:51> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:51> 'int' 2 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for-simd.c:31:9) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
cg_precond_fsai.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include "nb/math_bot.h" #include "nb/memory_bot.h" #include "nb/container_bot.h" #include "nb/solver_bot.h" #include "../sparse_struct.h" #define POW2(a) ((a)*(a)) int nb_sparse_solve_CG_precond_fsai (const nb_sparse_t *const A, const double *const b, double *_x, /* Out */ double threshold, uint32_t max_iter, double tolerance, uint32_t* niter_performed, /* Out (NULL if not required) */ double* tolerance_reached, /* Out (NULL if not required) */ uint32_t omp_parallel_threads) /* Return the num of iterations */ { /* Conjugate gradient preconditioned with "Factorized sparse * approximated inverse" */ double *D = nb_allocate_zero_mem(A->N * sizeof(double)); double *siD = nb_allocate_zero_mem(A->N * sizeof(double)); nb_sparse_t* G = nb_sparse_allocate(A->N); nb_sparse_t* Gt = nb_sparse_allocate(A->N); /* Generate D diagonal matrix as * * Dii = |Aii|, if |Aii| > 0 * 1 otherwise */ #pragma omp parallel for num_threads(omp_parallel_threads) for(uint32_t i=0; i < A->N; i++){ D[i] = nb_sparse_get(A,i,i); if(D[i] == 0) D[i] = 1; /* Compute D^(-1/2) */ siD[i] = 1/sqrt(D[i]); } /* Generate structure of G lower triangular matrix * * G = 1 , if (i == j) or (|[D^(-1/2) A D^(-1/2)]_ij| > threshold) * 0 , otherwise * */ for(uint32_t i=0; i < A->N; i++){ uint32_t isize = 0; uint32_t isizet = 0; #pragma omp parallel for reduction(+:isize,isizet) num_threads(omp_parallel_threads) for(uint32_t q=0; q< A->rows_size[i]; q++){ uint32_t j = A->rows_index[i][q]; if(i == j || fabs(siD[i] * A->rows_values[i][q] * siD[j]) > threshold){ if(i > j) isize++; else if(i < j) isizet++; else{ isize++; isizet++; } } } G->rows_size[i] = isize; G->rows_index[i] = nb_allocate_zero_mem(isize * sizeof(uint32_t)); G->rows_values[i] = nb_allocate_zero_mem(isize * sizeof(double)); Gt->rows_size[i] = isizet; Gt->rows_index[i] = nb_allocate_zero_mem(isizet * sizeof(uint32_t)); Gt->rows_values[i] = nb_allocate_zero_mem(isizet * sizeof(double)); } #pragma omp parallel for num_threads(omp_parallel_threads) for(uint32_t i=0; i < A->N; i++){ /* Compute values of ~G */ double* subA = nb_allocate_zero_mem(POW2(G->rows_size[i]) * sizeof(double)); /* The data of vector g is not allocated, is a pointer to each row of ~G */ double* subg = G->rows_values[i]; double *delta = nb_allocate_zero_mem(G->rows_size[i] * sizeof(double)); uint32_t k = 0; for(uint32_t q = 0; q < A->rows_size[i]; q++){ uint32_t j = A->rows_index[i][q]; if(i == j || fabs(siD[i] * A->rows_values[i][q] * siD[j]) > threshold){ if(i >= j){ G->rows_index[i][k] = j; for(uint32_t l=0; l<k; l++){ subA[k*G->rows_size[i] + l] = nb_sparse_get(A,j,G->rows_index[i][l]); subA[l*G->rows_size[i] + k] = nb_sparse_get(A,G->rows_index[i][l],j); } subA[k*G->rows_size[i] + k] = nb_sparse_get(A,j,j); if(i == j) delta[k] = 1; else delta[k] = 0; k++; } } } double* L = nb_allocate_zero_mem(POW2(G->rows_size[i]) * sizeof(double)); nb_matrix_cholesky_decomposition(subA, L, G->rows_size[i]); nb_matrix_cholesky_solve(L, delta, subg, G->rows_size[i]); /* Finally do G = [~G]*D */ for(uint32_t q=0; q < G->rows_size[i]; q++) G->rows_values[i][q] *= D[G->rows_index[i][q]]; /* Free memory */ nb_free_mem(subA); nb_free_mem(L); nb_free_mem(delta); } /* Store G transposed */ nb_sparse_get_transpose(G,Gt); /* Free memory */ nb_free_mem(D); nb_free_mem(siD); /* Solve Ax = b with Conjugate Gradient method */ double* r = nb_allocate_zero_mem(A->N * sizeof(double)); double* p = nb_allocate_zero_mem(A->N * sizeof(double)); double* w = nb_allocate_zero_mem(A->N * sizeof(double)); double* Gr = nb_allocate_zero_mem(A->N * sizeof(double)); double* Mr = nb_allocate_zero_mem(A->N * sizeof(double)); double dot_rr = 0; #pragma omp parallel for reduction(+:dot_rr) schedule(guided) num_threads(omp_parallel_threads) for(uint32_t i=0; i< A->N; i++){ double Ax_i = 0; for(uint32_t j=0; j< A->rows_size[i]; j++) Ax_i += A->rows_values[i][j] * _x[A->rows_index[i][j]]; r[i] = b[i] - Ax_i; dot_rr += r[i]*r[i]; } /* Compute Gr */ #pragma omp parallel for num_threads(omp_parallel_threads) for(uint32_t i=0; i< A->N; i++){ Gr[i] = 0; for(uint32_t j=0; j< G->rows_size[i]; j++) Gr[i] += G->rows_values[i][j] * r[G->rows_index[i][j]]; } /* Compute Mr <- G'(Gr) */ #pragma omp parallel for num_threads(omp_parallel_threads) for(uint32_t i=0; i< A->N; i++){ Mr[i] = 0; for(uint32_t j=0; j< Gt->rows_size[i]; j++) Mr[i] += Gt->rows_values[i][j] * Gr[Gt->rows_index[i][j]]; p[i] = Mr[i]; } uint32_t k = 0; /* Start iterations */ while(dot_rr > tolerance*tolerance && k < max_iter){ double dot_pw = 0; double dot_rMr = 0; #pragma omp parallel for reduction(+:dot_pw, dot_rMr) num_threads(omp_parallel_threads) schedule(guided) for(uint32_t i=0; i< A->N; i++){ w[i] = 0; for(uint32_t j=0; j< A->rows_size[i]; j++) w[i] += A->rows_values[i][j] * p[A->rows_index[i][j]]; dot_pw += p[i]*w[i]; dot_rMr += r[i]*Mr[i]; } double alphak = dot_rMr/dot_pw; dot_rr = 0; #pragma omp parallel for reduction(+:dot_rr) num_threads(omp_parallel_threads) schedule(guided) for(uint32_t i=0; i< A->N; i++){ _x[i] += alphak*p[i]; r[i] -= alphak*w[i]; dot_rr += r[i]*r[i]; } /* Compute Gr */ #pragma omp parallel for num_threads(omp_parallel_threads) for(uint32_t i=0; i< A->N; i++){ Gr[i] = 0; for(uint32_t j=0; j< G->rows_size[i]; j++) Gr[i] += G->rows_values[i][j] * r[G->rows_index[i][j]]; } /* Compute Mr <- G'(Gr) */ double dot_rkMrk = 0; #pragma omp parallel for reduction(+:dot_rkMrk) num_threads(omp_parallel_threads) for(uint32_t i=0; i< A->N; i++){ Mr[i] = 0; for(uint32_t j=0; j< Gt->rows_size[i]; j++) Mr[i] += Gt->rows_values[i][j] * Gr[Gt->rows_index[i][j]]; dot_rkMrk += r[i]*Mr[i]; } double betak = dot_rkMrk/dot_rMr; #pragma omp parallel for num_threads(omp_parallel_threads) for(uint32_t i=0; i< A->N; i++) p[i] = Mr[i] + betak*p[i]; k++; } /* Free memory */ nb_sparse_destroy(G); nb_sparse_destroy(Gt); nb_free_mem(r); nb_free_mem(p); nb_free_mem(w); nb_free_mem(Gr); nb_free_mem(Mr); if(niter_performed != NULL) niter_performed[0]= k; if(tolerance_reached != NULL) *tolerance_reached = sqrt(dot_rr); if(dot_rr > tolerance*tolerance) return 1; return 0; }
mandel-omp-task-row.c
/* * Sequential Mandelbrot program * * This program computes and displays all or part of the Mandelbrot * set. By default, it examines all points in the complex plane * that have both real and imaginary parts between -2 and 2. * Command-line parameters allow zooming in on a specific part of * this range. * * Usage: * mandel [-i maxiter -c x0 y0 -s size -w windowsize] * where * maxiter denotes the maximum number of iterations at each point -- by default 1000 * x0, y0, and size specify the range to examine (a square * centered at (x0 + iy0) of size 2*size by 2*size -- by default, * a square of size 4 by 4 centered at the origin) * windowsize denotes the size of the image (diplay window) to compute * * Input: none, except the optional command-line arguments * Output: a graphical display as described in Wilkinson & Allen, * displayed using the X Window system, plus text output to * standard output showing the above parameters, plus execution * time in seconds. * * Code based on the original code from Web site for Wilkinson and Allen's * text on parallel programming: * http://www.cs.uncc.edu/~abw/parallel/par_prog/ * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <unistd.h> #include <malloc.h> #if _DISPLAY_ #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xos.h> #endif #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6fs\n",(_m), stamp); /* Default values for things. */ #define N 2 /* size of problem space (x, y from -N to N) */ #define NPIXELS 800 /* size of display window in pixels */ int row, col; // variables used to traverse the problem space /* Structure definition for complex numbers */ typedef struct { double real, imag; } complex; #if _DISPLAY_ /* Functions for GUI */ #include "mandelbrot-gui.h" /* has setup(), interact() */ #endif void mandelbrot(int height, int width, double real_min, double imag_min, double scale_real, double scale_imag, int maxiter, #if _DISPLAY_ int setup_return, Display *display, Window win, GC gc, double scale_color, double min_color) #else int ** output) #endif { /* Calculate points and save/display */ #pragma omp parallel #pragma omp single { for (row = 0; row < height; ++row) { #pragma omp task firstprivate(row) private(col) for (col = 0; col < width; ++col) { complex z, c; z.real = z.imag = 0; /* Scale display coordinates to actual region */ c.real = real_min + ((double) col * scale_real); c.imag = imag_min + ((double) (height-1-row) * scale_imag); /* height-1-row so y axis displays * with larger values at top */ /* Calculate z0, z1, .... until divergence or maximum iterations */ int k = 0; double lengthsq, temp; do { temp = z.real*z.real - z.imag*z.imag + c.real; z.imag = 2*z.real*z.imag + c.imag; z.real = temp; lengthsq = z.real*z.real + z.imag*z.imag; ++k; } while (lengthsq < (N*N) && k < maxiter); #if _DISPLAY_ /* Scale color and display point */ long color = (long) ((k-1) * scale_color) + min_color; if (setup_return == EXIT_SUCCESS) { #pragma omp critical { XSetForeground (display, gc, color); XDrawPoint (display, win, gc, col, row); } } #else output[row][col]=k; #endif } } } } int main(int argc, char *argv[]) { int maxiter = 1000; double real_min; double real_max; double imag_min; double imag_max; int width = NPIXELS; /* dimensions of display window */ int height = NPIXELS; double size=N, x0 = 0, y0 = 0; #if _DISPLAY_ Display *display; Window win; GC gc; int setup_return; long min_color = 0, max_color = 0; double scale_color; #else int ** output; FILE *fp = NULL; #endif double scale_real, scale_imag; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-i")==0) { maxiter = atoi(argv[++i]); } else if (strcmp(argv[i], "-w")==0) { width = atoi(argv[++i]); height = width; } else if (strcmp(argv[i], "-s")==0) { size = atof(argv[++i]); } #if !_DISPLAY_ else if (strcmp(argv[i], "-o")==0) { if((fp=fopen("mandel.out", "wb"))==NULL) { fprintf(stderr, "Unable to open file\n"); return EXIT_FAILURE; } } #endif else if (strcmp(argv[i], "-c")==0) { x0 = atof(argv[++i]); y0 = atof(argv[++i]); } else { #if _DISPLAY_ fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); #else fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); fprintf(stderr, " -o to write computed image to disk (default no file generated)\n"); #endif fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n"); #if _DISPLAY_ fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n"); #else fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n"); #endif fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n"); fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n"); return EXIT_FAILURE; } } real_min = x0 - size; real_max = x0 + size; imag_min = y0 - size; imag_max = y0 + size; /* Produce text output */ fprintf(stdout, "\n"); fprintf(stdout, "Mandelbrot program\n"); fprintf(stdout, "center = (%g, %g), size = %g\n", (real_max + real_min)/2, (imag_max + imag_min)/2, (real_max - real_min)/2); fprintf(stdout, "maximum iterations = %d\n", maxiter); fprintf(stdout, "\n"); #if _DISPLAY_ /* Initialize for graphical display */ setup_return = setup(width, height, &display, &win, &gc, &min_color, &max_color); if (setup_return != EXIT_SUCCESS) { fprintf(stderr, "Unable to initialize display, continuing\n"); return EXIT_FAILURE; } #else output = malloc(height*sizeof(int *)); for (int row = 0; row < height; ++row) output[row] = malloc(width*sizeof(int)); #endif /* Compute factors to scale computational region to window */ scale_real = (double) (real_max - real_min) / (double) width; scale_imag = (double) (imag_max - imag_min) / (double) height; #if _DISPLAY_ /* Compute factor for color scaling */ scale_color = (double) (max_color - min_color) / (double) (maxiter - 1); #endif /* Start timing */ double stamp; START_COUNT_TIME; #if _DISPLAY_ mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, setup_return, display, win, gc, scale_color, min_color); #else mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, output); #endif /* End timing */ STOP_COUNT_TIME("Total execution time"); /* Be sure all output is written */ #if _DISPLAY_ if (setup_return == EXIT_SUCCESS) { XFlush (display); } #else if (fp != NULL) { for (int row = 0; row < height; ++row) if(fwrite(output[row], sizeof(int), width, fp) != width) { fprintf(stderr, "Output file not written correctly\n"); } } #endif #if _DISPLAY_ /* Wait for user response, then exit program */ if (setup_return == EXIT_SUCCESS) { interact(display, &win, width, height, real_min, real_max, imag_min, imag_max); } return EXIT_SUCCESS; #endif }
hello_mp.c
// Copyright (c) 2021 B.Roden // // MIT License // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // // OpenMP header #include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { int tid; // Begin parallel region, define thread private variable(s) #pragma omp parallel private(tid) { // The current thread number tid = omp_get_thread_num(); printf("thread no = %d\n", tid); // If in the master thread if (tid == 0) printf("no of threads = %d\n", omp_get_num_threads()); } }
concurrent_unordered_map.cuh.h
/* * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef CONCURRENT_UNORDERED_MAP_CUH #define CONCURRENT_UNORDERED_MAP_CUH #include <thrust/pair.h> #include <cassert> #include <iostream> #include <iterator> #include <type_traits> #include "hash_functions.cuh" #include "managed.cuh" #include "managed_allocator.cuh" // TODO: replace this with CUDA_TRY and propagate the error #ifndef CUDA_RT_CALL #define CUDA_RT_CALL(call) \ { \ cudaError_t cudaStatus = call; \ if (cudaSuccess != cudaStatus) { \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), \ cudaStatus); \ exit(1); \ } \ } #endif // TODO: can we do this more efficiently? __inline__ __device__ int8_t atomicCAS(int8_t* address, int8_t compare, int8_t val) { int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 3)); int32_t int_val = (int32_t)val << (((size_t)address & 3) * 8); int32_t int_comp = (int32_t)compare << (((size_t)address & 3) * 8); return (int8_t)atomicCAS(base_address, int_comp, int_val); } // TODO: can we do this more efficiently? __inline__ __device__ int16_t atomicCAS(int16_t* address, int16_t compare, int16_t val) { int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 2)); int32_t int_val = (int32_t)val << (((size_t)address & 2) * 8); int32_t int_comp = (int32_t)compare << (((size_t)address & 2) * 8); return (int16_t)atomicCAS(base_address, int_comp, int_val); } __inline__ __device__ int64_t atomicCAS(int64_t* address, int64_t compare, int64_t val) { return (int64_t)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val); } __inline__ __device__ uint64_t atomicCAS(uint64_t* address, uint64_t compare, uint64_t val) { return (uint64_t)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val); } __inline__ __device__ long long int atomicCAS(long long int* address, long long int compare, long long int val) { return (long long int)atomicCAS((unsigned long long*)address, (unsigned long long)compare, (unsigned long long)val); } __inline__ __device__ double atomicCAS(double* address, double compare, double val) { return __longlong_as_double(atomicCAS((unsigned long long int*)address, __double_as_longlong(compare), __double_as_longlong(val))); } __inline__ __device__ float atomicCAS(float* address, float compare, float val) { return __int_as_float( atomicCAS((int*)address, __float_as_int(compare), __float_as_int(val))); } __inline__ __device__ int64_t atomicAdd(int64_t* address, int64_t val) { return (int64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val); } __inline__ __device__ uint64_t atomicAdd(uint64_t* address, uint64_t val) { return (uint64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val); } template <typename pair_type> __forceinline__ __device__ pair_type load_pair_vectorized(const pair_type* __restrict__ const ptr) { if (sizeof(uint4) == sizeof(pair_type)) { union pair_type2vec_type { uint4 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0, 0, 0, 0}; converter.vec_val = *reinterpret_cast<const uint4*>(ptr); return converter.pair_val; } else if (sizeof(uint2) == sizeof(pair_type)) { union pair_type2vec_type { uint2 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0, 0}; converter.vec_val = *reinterpret_cast<const uint2*>(ptr); return converter.pair_val; } else if (sizeof(int) == sizeof(pair_type)) { union pair_type2vec_type { int vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.vec_val = *reinterpret_cast<const int*>(ptr); return converter.pair_val; } else if (sizeof(short) == sizeof(pair_type)) { union pair_type2vec_type { short vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.vec_val = *reinterpret_cast<const short*>(ptr); return converter.pair_val; } else { return *ptr; } } template <typename pair_type> __forceinline__ __device__ void store_pair_vectorized( pair_type* __restrict__ const ptr, const pair_type val) { if (sizeof(uint4) == sizeof(pair_type)) { union pair_type2vec_type { uint4 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0, 0, 0, 0}; converter.pair_val = val; *reinterpret_cast<uint4*>(ptr) = converter.vec_val; } else if (sizeof(uint2) == sizeof(pair_type)) { union pair_type2vec_type { uint2 vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0, 0}; converter.pair_val = val; *reinterpret_cast<uint2*>(ptr) = converter.vec_val; } else if (sizeof(int) == sizeof(pair_type)) { union pair_type2vec_type { int vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.pair_val = val; *reinterpret_cast<int*>(ptr) = converter.vec_val; } else if (sizeof(short) == sizeof(pair_type)) { union pair_type2vec_type { short vec_val; pair_type pair_val; }; pair_type2vec_type converter = {0}; converter.pair_val = val; *reinterpret_cast<short*>(ptr) = converter.vec_val; } else { *ptr = val; } } template <typename value_type, typename size_type, typename key_type, typename elem_type> __global__ void init_hashtbl( // Init every entry of the table with // <unused_key, unused_value> pair value_type* __restrict__ const hashtbl_values, const size_type n, const key_type key_val, const elem_type elem_val) { const size_type idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < n) { store_pair_vectorized( hashtbl_values + idx, thrust::make_pair( key_val, elem_val)); // Simply store every element a <K, V> pair } } template <typename T> struct equal_to { using result_type = bool; using first_argument_type = T; using second_argument_type = T; __forceinline__ __host__ __device__ constexpr bool operator()( const first_argument_type& lhs, const second_argument_type& rhs) const { return lhs == rhs; } }; template <typename Iterator> class cycle_iterator_adapter { public: using value_type = typename std::iterator_traits<Iterator>::value_type; using difference_type = typename std::iterator_traits<Iterator>::difference_type; using pointer = typename std::iterator_traits<Iterator>::pointer; using reference = typename std::iterator_traits<Iterator>::reference; using iterator_type = Iterator; cycle_iterator_adapter() = delete; __host__ __device__ explicit cycle_iterator_adapter( const iterator_type& begin, const iterator_type& end, const iterator_type& current) : m_begin(begin), m_end(end), m_current(current) {} __host__ __device__ cycle_iterator_adapter& operator++() { if (m_end == (m_current + 1)) m_current = m_begin; else ++m_current; return *this; } __host__ __device__ const cycle_iterator_adapter& operator++() const { if (m_end == (m_current + 1)) m_current = m_begin; else ++m_current; return *this; } __host__ __device__ cycle_iterator_adapter& operator++(int) { cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current); if (m_end == (m_current + 1)) m_current = m_begin; else ++m_current; return old; } __host__ __device__ const cycle_iterator_adapter& operator++(int)const { cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current); if (m_end == (m_current + 1)) m_current = m_begin; else ++m_current; return old; } __host__ __device__ bool equal( const cycle_iterator_adapter<iterator_type>& other) const { return m_current == other.m_current && m_begin == other.m_begin && m_end == other.m_end; } __host__ __device__ reference& operator*() { return *m_current; } __host__ __device__ const reference& operator*() const { return *m_current; } __host__ __device__ const pointer operator->() const { return m_current.operator->(); } __host__ __device__ pointer operator->() { return m_current; } __host__ __device__ iterator_type getter() const { return m_current; } private: iterator_type m_current; iterator_type m_begin; iterator_type m_end; }; template <class T> __host__ __device__ bool operator==(const cycle_iterator_adapter<T>& lhs, const cycle_iterator_adapter<T>& rhs) { return lhs.equal(rhs); } template <class T> __host__ __device__ bool operator!=(const cycle_iterator_adapter<T>& lhs, const cycle_iterator_adapter<T>& rhs) { return !lhs.equal(rhs); } /** * Does support concurrent insert, but not concurrent insert and probping. * * TODO: * - add constructor that takes pointer to hash_table to avoid allocations * - extend interface to accept streams */ template <typename Key, typename Element, Key unused_key, typename Hasher = default_hash<Key>, typename Equality = equal_to<Key>, typename Allocator = managed_allocator<thrust::pair<Key, Element>>, bool count_collisions = false> class concurrent_unordered_map : public managed { public: using size_type = size_t; using hasher = Hasher; using key_equal = Equality; using allocator_type = Allocator; using key_type = Key; using value_type = thrust::pair<Key, Element>; using mapped_type = Element; using iterator = cycle_iterator_adapter<value_type*>; using const_iterator = const cycle_iterator_adapter<value_type*>; private: union pair2longlong { unsigned long long int longlong; value_type pair; }; public: concurrent_unordered_map(const concurrent_unordered_map&) = delete; concurrent_unordered_map& operator=(const concurrent_unordered_map&) = delete; explicit concurrent_unordered_map(size_type n, const mapped_type unused_element, const Hasher& hf = hasher(), const Equality& eql = key_equal(), const allocator_type& a = allocator_type()) : m_hf(hf), m_equal(eql), m_allocator(a), m_hashtbl_size(n), m_hashtbl_capacity(n), m_collisions(0), m_unused_element( unused_element) { // allocate the raw data of hash table: // m_hashtbl_values,pre-alloc it on current GPU if UM. m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity); constexpr int block_size = 128; { cudaPointerAttributes hashtbl_values_ptr_attributes; cudaError_t status = cudaPointerGetAttributes( &hashtbl_values_ptr_attributes, m_hashtbl_values); #if CUDART_VERSION >= 10000 if (cudaSuccess == status && hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged) #else if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged) #endif { int dev_id = 0; CUDA_RT_CALL(cudaGetDevice(&dev_id)); CUDA_RT_CALL(cudaMemPrefetchAsync( m_hashtbl_values, m_hashtbl_size * sizeof(value_type), dev_id, 0)); } } // Initialize kernel, set all entry to unused <K,V> init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size>>>( m_hashtbl_values, m_hashtbl_size, unused_key, m_unused_element); // CUDA_RT_CALL( cudaGetLastError() ); CUDA_RT_CALL(cudaStreamSynchronize(0)); CUDA_RT_CALL(cudaGetLastError()); } ~concurrent_unordered_map() { m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity); } __host__ __device__ iterator begin() { return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, m_hashtbl_values); } __host__ __device__ const_iterator begin() const { return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, m_hashtbl_values); } __host__ __device__ iterator end() { return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, m_hashtbl_values + m_hashtbl_size); } __host__ __device__ const_iterator end() const { return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, m_hashtbl_values + m_hashtbl_size); } __host__ __device__ size_type size() const { return m_hashtbl_size; } __host__ __device__ value_type* data() const { return m_hashtbl_values; } __forceinline__ static constexpr __host__ __device__ key_type get_unused_key() { return unused_key; } // Generic update of a hash table value for any aggregator template <typename aggregation_type> __forceinline__ __device__ void update_existing_value( mapped_type& existing_value, value_type const& insert_pair, aggregation_type) { // update without CAS existing_value = insert_pair.second; } __forceinline__ __device__ void accum_existing_value_atomic( mapped_type& existing_value, value_type const& accum_pair) { // update with CAS // existing_value = insert_pair.second; int num_element = sizeof(existing_value.data) / sizeof(*(existing_value.data)); const mapped_type& accumulator = accum_pair.second; for (int i = 0; i < num_element; i++) { atomicAdd(existing_value.data + i, accumulator.data[i]); } // atomicAdd(&existing_value, double val) } // TODO Overload atomicAdd for 1 byte and 2 byte types, until then, overload // specifically for the // types where atomicAdd already has an overload. Otherwise the generic // update_existing_value will // be used. Specialization for COUNT aggregator /* __forceinline__ __host__ __device__ void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op<int32_t> op) { atomicAdd(&existing_value, static_cast<mapped_type>(1)); } // Specialization for COUNT aggregator __forceinline__ __host__ __device__ void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op<int64_t> op) { atomicAdd(&existing_value, static_cast<mapped_type>(1)); } // Specialization for COUNT aggregator __forceinline__ __host__ __device__ void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op<float> op) { atomicAdd(&existing_value, static_cast<mapped_type>(1)); } // Specialization for COUNT aggregator __forceinline__ __host__ __device__ void update_existing_value(mapped_type & existing_value, value_type const & insert_pair, count_op<double> op) { atomicAdd(&existing_value, static_cast<mapped_type>(1)); } */ /* --------------------------------------------------------------------------*/ /** * @Synopsis Inserts a new (key, value) pair. If the key already exists in the map an aggregation operation is performed with the new value and existing value. E.g., if the aggregation operation is 'max', then the maximum is computed between the new value and existing value and the result is stored in the map. * * @Param[in] x The new (key, value) pair to insert * @Param[in] op The aggregation operation to perform * @Param[in] keys_equal An optional functor for comparing two keys * @Param[in] precomputed_hash Indicates if a precomputed hash value is being passed in to use * to determine the write location of the new key * @Param[in] precomputed_hash_value The precomputed hash value * @tparam aggregation_type A functor for a binary operation that performs the aggregation * @tparam comparison_type A functor for comparing two keys * * @Returns An iterator to the newly inserted key,value pair */ /* ----------------------------------------------------------------------------*/ template <typename aggregation_type, class comparison_type = key_equal, typename hash_value_type = typename Hasher::result_type> __forceinline__ __device__ iterator insert( const value_type& x, aggregation_type op, comparison_type keys_equal = key_equal(), bool precomputed_hash = false, hash_value_type precomputed_hash_value = 0) { const size_type hashtbl_size = m_hashtbl_size; value_type* hashtbl_values = m_hashtbl_values; hash_value_type hash_value{0}; // If a precomputed hash value has been passed in, then use it to determine // the write location of the new key if (true == precomputed_hash) { hash_value = precomputed_hash_value; } // Otherwise, compute the hash value from the new key else { hash_value = m_hf(x.first); } size_type current_index = hash_value % hashtbl_size; value_type* current_hash_bucket = &(hashtbl_values[current_index]); const key_type insert_key = x.first; bool insert_success = false; size_type counter = 0; while (false == insert_success) { if (counter++ >= hashtbl_size) { return end(); } key_type& existing_key = current_hash_bucket->first; mapped_type& existing_value = current_hash_bucket->second; // Try and set the existing_key for the current hash bucket to insert_key const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key); // If old_key == unused_key, the current hash bucket was empty // and existing_key was updated to insert_key by the atomicCAS. // If old_key == insert_key, this key has already been inserted. // In either case, perform the atomic aggregation of existing_value and // insert_value // Because the hash table is initialized with the identity value of the // aggregation // operation, it is safe to perform the operation when the existing_value // still // has its initial value // TODO: Use template specialization to make use of native atomic // functions // TODO: How to handle data types less than 32 bits? if (keys_equal(unused_key, old_key) || keys_equal(insert_key, old_key)) { update_existing_value(existing_value, x, op); insert_success = true; } current_index = (current_index + 1) % hashtbl_size; current_hash_bucket = &(hashtbl_values[current_index]); } return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size, current_hash_bucket); } /* This function is not currently implemented __forceinline__ __host__ __device__ iterator insert(const value_type& x) { const size_type hashtbl_size = m_hashtbl_size; value_type* hashtbl_values = m_hashtbl_values; const size_type key_hash = m_hf( x.first ); size_type hash_tbl_idx = key_hash%hashtbl_size; value_type* it = 0; while (0 == it) { value_type* tmp_it = hashtbl_values + hash_tbl_idx; #ifdef __CUDA_ARCH__ if ( std::numeric_limits<key_type>::is_integer && std::numeric_limits<mapped_type>::is_integer && sizeof(unsigned long long int) == sizeof(value_type) ) { pair2longlong converter = {0ull}; converter.pair = thrust::make_pair( unused_key, m_unused_element ); const unsigned long long int unused = converter.longlong; converter.pair = x; const unsigned long long int value = converter.longlong; const unsigned long long int old_val = atomicCAS( reinterpret_cast<unsigned long long int*>(tmp_it), unused, value ); if ( old_val == unused ) { it = tmp_it; } else if ( count_collisions ) { atomicAdd( &m_collisions, 1 ); } } else { const key_type old_key = atomicCAS( &(tmp_it->first), unused_key, x.first ); if ( m_equal( unused_key, old_key ) ) { (m_hashtbl_values+hash_tbl_idx)->second = x.second; it = tmp_it; } else if ( count_collisions ) { atomicAdd( &m_collisions, 1 ); } } #else #pragma omp critical { if ( m_equal( unused_key, tmp_it->first ) ) { hashtbl_values[hash_tbl_idx] = thrust::make_pair( x.first, x.second ); it = tmp_it; } } #endif hash_tbl_idx = (hash_tbl_idx+1)%hashtbl_size; } return iterator( m_hashtbl_values,m_hashtbl_values+hashtbl_size,it); } */ __forceinline__ __host__ __device__ const_iterator find(const key_type& k) const { size_type key_hash = m_hf(k); size_type hash_tbl_idx = key_hash % m_hashtbl_size; value_type* begin_ptr = 0; size_type counter = 0; while (0 == begin_ptr) { value_type* tmp_ptr = m_hashtbl_values + hash_tbl_idx; const key_type tmp_val = tmp_ptr->first; if (m_equal(k, tmp_val)) { begin_ptr = tmp_ptr; break; } if (m_equal(unused_key, tmp_val) || counter > m_hashtbl_size) { begin_ptr = m_hashtbl_values + m_hashtbl_size; break; } hash_tbl_idx = (hash_tbl_idx + 1) % m_hashtbl_size; ++counter; } return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size, begin_ptr); } template <typename aggregation_type, typename counter_type, class comparison_type = key_equal, typename hash_value_type = typename Hasher::result_type> __forceinline__ __device__ iterator get_insert( const key_type& k, aggregation_type op, counter_type* value_counter, comparison_type keys_equal = key_equal(), bool precomputed_hash = false, hash_value_type precomputed_hash_value = 0) { const size_type hashtbl_size = m_hashtbl_size; value_type* hashtbl_values = m_hashtbl_values; hash_value_type hash_value{0}; // If a precomputed hash value has been passed in, then use it to determine // the write location of the new key if (true == precomputed_hash) { hash_value = precomputed_hash_value; } // Otherwise, compute the hash value from the new key else { hash_value = m_hf(k); } size_type current_index = hash_value % hashtbl_size; value_type* current_hash_bucket = &(hashtbl_values[current_index]); const key_type insert_key = k; bool insert_success = false; size_type counter = 0; while (false == insert_success) { // Situation %5: No slot: All slot in the hashtable is occupied by other // key, both get and // insert fail. Return empty iterator if (counter++ >= hashtbl_size) { return end(); } key_type& existing_key = current_hash_bucket->first; volatile mapped_type& existing_value = current_hash_bucket->second; // Try and set the existing_key for the current hash bucket to insert_key const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key); // If old_key == unused_key, the current hash bucket was empty // and existing_key was updated to insert_key by the atomicCAS. // If old_key == insert_key, this key has already been inserted. // In either case, perform the atomic aggregation of existing_value and // insert_value // Because the hash table is initialized with the identity value of the // aggregation // operation, it is safe to perform the operation when the existing_value // still // has its initial value // TODO: Use template specialization to make use of native atomic // functions // TODO: How to handle data types less than 32 bits? // Situation #1: Empty slot: this key never exist in the table, ready to // insert. if (keys_equal(unused_key, old_key)) { // update_existing_value(existing_value, x, op); existing_value = (mapped_type)(atomicAdd(value_counter, 1)); break; } // Situation #2+#3: Target slot: This slot is the slot for this key else if (keys_equal(insert_key, old_key)) { while (existing_value == m_unused_element) { // Situation #2: This slot is inserting by another CUDA thread and the // value is not yet // ready, just wait } // Situation #3: This slot is already ready, get successfully and return // (iterator of) the // value break; } // Situation 4: Wrong slot: This slot is occupied by other key, get fail, // do nothing and // linear probing to next slot. current_index = (current_index + 1) % hashtbl_size; current_hash_bucket = &(hashtbl_values[current_index]); } return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size, current_hash_bucket); } int assign_async(const concurrent_unordered_map& other, cudaStream_t stream = 0) { m_collisions = other.m_collisions; if (other.m_hashtbl_size <= m_hashtbl_capacity) { m_hashtbl_size = other.m_hashtbl_size; } else { m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity); m_hashtbl_capacity = other.m_hashtbl_size; m_hashtbl_size = other.m_hashtbl_size; m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity); } CUDA_RT_CALL(cudaMemcpyAsync(m_hashtbl_values, other.m_hashtbl_values, m_hashtbl_size * sizeof(value_type), cudaMemcpyDefault, stream)); return 0; } void clear_async(cudaStream_t stream = 0) { constexpr int block_size = 128; init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size, 0, stream>>>(m_hashtbl_values, m_hashtbl_size, unused_key, m_unused_element); if (count_collisions) m_collisions = 0; } unsigned long long get_num_collisions() const { return m_collisions; } void print() { for (size_type i = 0; i < m_hashtbl_size; ++i) { std::cout << i << ": " << m_hashtbl_values[i].first << "," << m_hashtbl_values[i].second << std::endl; } } int prefetch(const int dev_id, cudaStream_t stream = 0) { cudaPointerAttributes hashtbl_values_ptr_attributes; cudaError_t status = cudaPointerGetAttributes( &hashtbl_values_ptr_attributes, m_hashtbl_values); #if CUDART_VERSION >= 10000 if (cudaSuccess == status && hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged) #else if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged) #endif { CUDA_RT_CALL(cudaMemPrefetchAsync(m_hashtbl_values, m_hashtbl_size * sizeof(value_type), dev_id, stream)); } CUDA_RT_CALL(cudaMemPrefetchAsync(this, sizeof(*this), dev_id, stream)); return 0; } template <class comparison_type = key_equal, typename hash_value_type = typename Hasher::result_type> __forceinline__ __device__ const_iterator accum(const value_type& x, comparison_type keys_equal = key_equal(), bool precomputed_hash = false, hash_value_type precomputed_hash_value = 0) { const key_type& dst_key = x.first; auto it = find(dst_key); if (it == end()) { return it; } value_type* dst = it.getter(); accum_existing_value_atomic(dst->second, x); return it; } private: const hasher m_hf; const key_equal m_equal; const mapped_type m_unused_element; allocator_type m_allocator; size_type m_hashtbl_size; size_type m_hashtbl_capacity; value_type* m_hashtbl_values; unsigned long long m_collisions; }; #endif // CONCURRENT_UNORDERED_MAP_CUH
Jacobi2D-NaiveParallel-OMP_dyn.test.c
/****************************************************************************** * Jacobi2D benchmark * Basic parallelisation with OpenMP * * Usage: * make omp * export OMP_NUM_THREADS=8 * bin/Jacobi2D-NaiveParallel-OMP \ * `cat src/Jacobi2D-NaiveParallel-OMP.perfexecopts` * For a run on 8 threads ******************************************************************************/ #include <stdio.h> #include <omp.h> #include <time.h> #include <stdlib.h> #include <getopt.h> #include <stdbool.h> #include <ctype.h> #include <math.h> #include <assert.h> #define STENCIL(read,write,x,y) space[write][x][y] = \ ( space[read][x-1][y] +\ space[read][x][y] +\ space[read][x+1][y] +\ space[read][x][y+1] +\ space[read][x][y-1] )/5; #include "util.h" // main // Stages // 1 - command line parsing // 2 - data allocation and initialization // 3 - jacobi 1D timed within an openmp loop // 4 - output and optional verification int main( int argc, char* argv[] ){ // rather than calling fflush setbuf(stdout, NULL); // 1 - command line parsing Params cmdLineArgs; parseCmdLineArgs(&cmdLineArgs,argc,argv); // 2 - data allocation and initialization int lowerBound = 1; int upperBound = lowerBound + cmdLineArgs.problemSize - 1; double** space[2]; int i; // allocate x axis space[0] = (double**)malloc((cmdLineArgs.problemSize + 2) * sizeof(double*)); space[1] = (double**)malloc((cmdLineArgs.problemSize + 2) * sizeof(double*)); if( space[0] == NULL || space[1] == NULL ){ printf( "Could not allocate x axis of space array\n" ); exit(0); } // allocate y axis for( i = 0; i < cmdLineArgs.problemSize + 2; ++i ){ space[0][i]=(double*)malloc((cmdLineArgs.problemSize + 2) * sizeof(double)); space[1][i]=(double*)malloc((cmdLineArgs.problemSize + 2) * sizeof(double)); if( space[0][i] == NULL || space[1][i] == NULL ){ printf( "Could not allocate y axis of space array\n" ); exit(0); } } // use global seed to seed the random number gen (will be constant) srand(cmdLineArgs.globalSeed); // first touch for openmp int x, y; #pragma omp parallel for private( x, y ) schedule(dynamic) for( x = lowerBound; x <= upperBound; ++x ){ for( y = lowerBound; y <= upperBound; ++y ){ space[0][x][y] = 0; } } // seed the space. for( x = lowerBound; x <= upperBound; ++x ){ for( y = lowerBound; y <= upperBound; ++y ){ space[0][x][y] = rand() / (double)rand(); } } // set halo values (sanity) for( i = 0; i < cmdLineArgs.problemSize + 2; ++i){ space[0][i][0] = 0; space[1][i][0] = 0; space[0][i][cmdLineArgs.problemSize + 1] = 0; space[1][i][cmdLineArgs.problemSize + 1] = 0; space[0][0][i] = 0; space[1][0][i] = 0; space[0][cmdLineArgs.problemSize + 1][i] = 0; space[1][cmdLineArgs.problemSize + 1][i] = 0; } // 3 - jacobi 2D timed within an openmp loop double start_time = omp_get_wtime(); int t,read=0,write=1; for( t = 1; t <= cmdLineArgs.T; ++t ){ #pragma omp parallel for private( x, y ) schedule(dynamic) for( x = lowerBound; x <= upperBound; ++x ){ for( y = lowerBound; y <= upperBound; ++y ){ STENCIL( read, write, x, y); } } read = write; write = 1 - write; } double end_time = omp_get_wtime(); double time = (end_time - start_time); // 4 - output and optional verification if( cmdLineArgs.printtime ){ /* printf( "Threads: %d, P: %d, ",cmdLineArgs.cores, cmdLineArgs.problemSize); */ printf( "Time: %f", time ); } if( cmdLineArgs.verify ){ if(!verifyResultJacobi2D(space[cmdLineArgs.T & 1],cmdLineArgs.problemSize, cmdLineArgs.globalSeed,cmdLineArgs.T )){ fprintf(stderr,"FAILURE\n"); }else{ fprintf(stderr,"SUCCESS\n"); } } }
convolution_winograd_transform_pack16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack16_avx512(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(64)) #else __attribute__((aligned(64))) #endif float tmp[8][8][16]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 16; for (int m = 0; m < 8; m++) { __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 16 * 2); __m512 _r03 = _mm512_load_ps(r0 + 16 * 3); __m512 _r04 = _mm512_load_ps(r0 + 16 * 4); __m512 _r05 = _mm512_load_ps(r0 + 16 * 5); __m512 _r06 = _mm512_load_ps(r0 + 16 * 6); __m512 _r07 = _mm512_load_ps(r0 + 16 * 7); __m512 _tmp0m = _mm512_fmadd_ps(_mm512_set1_ps(5.25f), _mm512_sub_ps(_r04, _r02), _mm512_sub_ps(_r00, _r06)); __m512 _tmp7m = _mm512_fmadd_ps(_mm512_set1_ps(5.25f), _mm512_sub_ps(_r03, _r05), _mm512_sub_ps(_r07, _r01)); _mm512_store_ps(tmp[0][m], _tmp0m); _mm512_store_ps(tmp[7][m], _tmp7m); __m512 _tmp12a = _mm512_fmadd_ps(_mm512_set1_ps(-4.25f), _r04, _mm512_add_ps(_r02, _r06)); __m512 _tmp12b = _mm512_fmadd_ps(_mm512_set1_ps(-4.25f), _r03, _mm512_add_ps(_r01, _r05)); __m512 _tmp1m = _mm512_add_ps(_tmp12a, _tmp12b); __m512 _tmp2m = _mm512_sub_ps(_tmp12a, _tmp12b); _mm512_store_ps(tmp[1][m], _tmp1m); _mm512_store_ps(tmp[2][m], _tmp2m); __m512 _tmp34a = _mm512_fmadd_ps(_mm512_set1_ps(-1.25f), _r04, _mm512_fmadd_ps(_mm512_set1_ps(0.25f), _r02, _r06)); __m512 _tmp34b = _mm512_fmadd_ps(_mm512_set1_ps(2.f), _r05, _mm512_fmadd_ps(_mm512_set1_ps(-2.5f), _r03, _mm512_mul_ps(_r01, _mm512_set1_ps(0.5f)))); __m512 _tmp3m = _mm512_add_ps(_tmp34a, _tmp34b); __m512 _tmp4m = _mm512_sub_ps(_tmp34a, _tmp34b); _mm512_store_ps(tmp[3][m], _tmp3m); _mm512_store_ps(tmp[4][m], _tmp4m); __m512 _tmp56a = _mm512_fmadd_ps(_mm512_set1_ps(4.f), _mm512_fmadd_ps(_mm512_set1_ps(-1.25f), _r04, _r02), _r06); __m512 _tmp56b = _mm512_fmadd_ps(_mm512_set1_ps(0.5f), _r05, _mm512_fmadd_ps(_mm512_set1_ps(-2.5f), _r03, _mm512_mul_ps(_r01, _mm512_set1_ps(2.f)))); __m512 _tmp5m = _mm512_add_ps(_tmp56a, _tmp56b); __m512 _tmp6m = _mm512_sub_ps(_tmp56a, _tmp56b); _mm512_store_ps(tmp[5][m], _tmp5m); _mm512_store_ps(tmp[6][m], _tmp6m); r0 += w * 16; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 16; float* r0_tm_1 = r0_tm_0 + tiles * 16; float* r0_tm_2 = r0_tm_0 + tiles * 16 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 16 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 16 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 16 * 5; float* r0_tm_6 = r0_tm_0 + tiles * 16 * 6; float* r0_tm_7 = r0_tm_0 + tiles * 16 * 7; for (int m = 0; m < 8; m++) { __m512 _tmp00 = _mm512_load_ps(tmp[m][0]); __m512 _tmp01 = _mm512_load_ps(tmp[m][1]); __m512 _tmp02 = _mm512_load_ps(tmp[m][2]); __m512 _tmp03 = _mm512_load_ps(tmp[m][3]); __m512 _tmp04 = _mm512_load_ps(tmp[m][4]); __m512 _tmp05 = _mm512_load_ps(tmp[m][5]); __m512 _tmp06 = _mm512_load_ps(tmp[m][6]); __m512 _tmp07 = _mm512_load_ps(tmp[m][7]); __m512 _r0tm0 = _mm512_fmadd_ps(_mm512_set1_ps(5.25f), _mm512_sub_ps(_tmp04, _tmp02), _mm512_sub_ps(_tmp00, _tmp06)); __m512 _r0tm7 = _mm512_fmadd_ps(_mm512_set1_ps(5.25f), _mm512_sub_ps(_tmp03, _tmp05), _mm512_sub_ps(_tmp07, _tmp01)); __m512 _tmp12a = _mm512_fmadd_ps(_mm512_set1_ps(-4.25f), _tmp04, _mm512_add_ps(_tmp02, _tmp06)); __m512 _tmp12b = _mm512_fmadd_ps(_mm512_set1_ps(-4.25f), _tmp03, _mm512_add_ps(_tmp01, _tmp05)); __m512 _r0tm1 = _mm512_add_ps(_tmp12a, _tmp12b); __m512 _r0tm2 = _mm512_sub_ps(_tmp12a, _tmp12b); __m512 _tmp34a = _mm512_fmadd_ps(_mm512_set1_ps(-1.25f), _tmp04, _mm512_fmadd_ps(_mm512_set1_ps(0.25f), _tmp02, _tmp06)); __m512 _tmp34b = _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp05, _mm512_fmadd_ps(_mm512_set1_ps(-2.5f), _tmp03, _mm512_mul_ps(_tmp01, _mm512_set1_ps(0.5f)))); __m512 _r0tm3 = _mm512_add_ps(_tmp34a, _tmp34b); __m512 _r0tm4 = _mm512_sub_ps(_tmp34a, _tmp34b); __m512 _tmp56a = _mm512_fmadd_ps(_mm512_set1_ps(4.f), _mm512_fmadd_ps(_mm512_set1_ps(-1.25f), _tmp04, _tmp02), _tmp06); __m512 _tmp56b = _mm512_fmadd_ps(_mm512_set1_ps(0.5f), _tmp05, _mm512_fmadd_ps(_mm512_set1_ps(-2.5f), _tmp03, _mm512_mul_ps(_tmp01, _mm512_set1_ps(2.f)))); __m512 _r0tm5 = _mm512_add_ps(_tmp56a, _tmp56b); __m512 _r0tm6 = _mm512_sub_ps(_tmp56a, _tmp56b); _mm512_store_ps(r0_tm_0, _r0tm0); _mm512_store_ps(r0_tm_1, _r0tm1); _mm512_store_ps(r0_tm_2, _r0tm2); _mm512_store_ps(r0_tm_3, _r0tm3); _mm512_store_ps(r0_tm_4, _r0tm4); _mm512_store_ps(r0_tm_5, _r0tm5); _mm512_store_ps(r0_tm_6, _r0tm6); _mm512_store_ps(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 128; r0_tm_1 += tiles * 128; r0_tm_2 += tiles * 128; r0_tm_3 += tiles * 128; r0_tm_4 += tiles * 128; r0_tm_5 += tiles * 128; r0_tm_6 += tiles * 128; r0_tm_7 += tiles * 128; } } } } } static void conv3x3s1_winograd63_transform_output_pack16_avx512(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m512 _bias0 = biasptr ? _mm512_loadu_ps(biasptr + p * 16) : _mm512_setzero_ps(); #ifdef _MSC_VER __declspec(align(64)) #else __attribute__((aligned(64))) #endif float tmp[6][8][16]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 16; const float* output0_tm_1 = output0_tm_0 + tiles * 16; const float* output0_tm_2 = output0_tm_0 + tiles * 16 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 16 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 16 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 16 * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 16 * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 16 * 7; float* output0 = out0.row(i * 6) + (j * 6) * 16; for (int m = 0; m < 8; m++) { __m512 _out0tm0 = _mm512_load_ps(output0_tm_0); __m512 _out0tm1 = _mm512_load_ps(output0_tm_1); __m512 _out0tm2 = _mm512_load_ps(output0_tm_2); __m512 _out0tm3 = _mm512_load_ps(output0_tm_3); __m512 _out0tm4 = _mm512_load_ps(output0_tm_4); __m512 _out0tm5 = _mm512_load_ps(output0_tm_5); __m512 _out0tm6 = _mm512_load_ps(output0_tm_6); __m512 _out0tm7 = _mm512_load_ps(output0_tm_7); __m512 _tmp024a = _mm512_add_ps(_out0tm1, _out0tm2); __m512 _tmp135a = _mm512_sub_ps(_out0tm1, _out0tm2); __m512 _tmp024b = _mm512_add_ps(_out0tm3, _out0tm4); __m512 _tmp135b = _mm512_sub_ps(_out0tm3, _out0tm4); __m512 _tmp024c = _mm512_add_ps(_out0tm5, _out0tm6); __m512 _tmp135c = _mm512_sub_ps(_out0tm5, _out0tm6); __m512 _tmp0m = _mm512_add_ps(_mm512_add_ps(_out0tm0, _tmp024a), _mm512_fmadd_ps(_mm512_set1_ps(32.f), _tmp024c, _tmp024b)); __m512 _tmp2m = _mm512_fmadd_ps(_mm512_set1_ps(8.f), _tmp024c, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp024b, _tmp024a)); __m512 _tmp4m = _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp024c, _mm512_fmadd_ps(_mm512_set1_ps(16.f), _tmp024b, _tmp024a)); _mm512_store_ps(tmp[0][m], _tmp0m); _mm512_store_ps(tmp[2][m], _tmp2m); _mm512_store_ps(tmp[4][m], _tmp4m); __m512 _tmp1m = _mm512_fmadd_ps(_mm512_set1_ps(16.f), _tmp135c, _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp135b, _tmp135a)); __m512 _tmp3m = _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp135c, _mm512_fmadd_ps(_mm512_set1_ps(8.f), _tmp135b, _tmp135a)); __m512 _tmp5m = _mm512_add_ps(_mm512_add_ps(_out0tm7, _tmp135a), _mm512_fmadd_ps(_mm512_set1_ps(32.f), _tmp135b, _tmp135c)); _mm512_store_ps(tmp[1][m], _tmp1m); _mm512_store_ps(tmp[3][m], _tmp3m); _mm512_store_ps(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 128; output0_tm_1 += tiles * 128; output0_tm_2 += tiles * 128; output0_tm_3 += tiles * 128; output0_tm_4 += tiles * 128; output0_tm_5 += tiles * 128; output0_tm_6 += tiles * 128; output0_tm_7 += tiles * 128; } for (int m = 0; m < 6; m++) { __m512 _tmp00 = _mm512_load_ps(tmp[m][0]); __m512 _tmp01 = _mm512_load_ps(tmp[m][1]); __m512 _tmp02 = _mm512_load_ps(tmp[m][2]); __m512 _tmp03 = _mm512_load_ps(tmp[m][3]); __m512 _tmp04 = _mm512_load_ps(tmp[m][4]); __m512 _tmp05 = _mm512_load_ps(tmp[m][5]); __m512 _tmp06 = _mm512_load_ps(tmp[m][6]); __m512 _tmp07 = _mm512_load_ps(tmp[m][7]); __m512 _tmp024a = _mm512_add_ps(_tmp01, _tmp02); __m512 _tmp135a = _mm512_sub_ps(_tmp01, _tmp02); __m512 _tmp024b = _mm512_add_ps(_tmp03, _tmp04); __m512 _tmp135b = _mm512_sub_ps(_tmp03, _tmp04); __m512 _tmp024c = _mm512_add_ps(_tmp05, _tmp06); __m512 _tmp135c = _mm512_sub_ps(_tmp05, _tmp06); __m512 _out00 = _mm512_add_ps(_bias0, _mm512_add_ps(_mm512_add_ps(_tmp00, _tmp024a), _mm512_fmadd_ps(_mm512_set1_ps(32.f), _tmp024c, _tmp024b))); __m512 _out02 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(8.f), _tmp024c, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp024b, _tmp024a))); __m512 _out04 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp024c, _mm512_fmadd_ps(_mm512_set1_ps(16.f), _tmp024b, _tmp024a))); _mm512_store_ps(output0, _out00); _mm512_store_ps(output0 + 32, _out02); _mm512_store_ps(output0 + 64, _out04); __m512 _out01 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(16.f), _tmp135c, _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp135b, _tmp135a))); __m512 _out03 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp135c, _mm512_fmadd_ps(_mm512_set1_ps(8.f), _tmp135b, _tmp135a))); __m512 _out05 = _mm512_add_ps(_bias0, _mm512_add_ps(_mm512_add_ps(_tmp07, _tmp135a), _mm512_fmadd_ps(_mm512_set1_ps(32.f), _tmp135b, _tmp135c))); _mm512_store_ps(output0 + 16, _out01); _mm512_store_ps(output0 + 48, _out03); _mm512_store_ps(output0 + 80, _out05); output0 += outw * 16; } } } } } static void conv3x3s1_winograd43_transform_input_pack16_avx512(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); #ifdef _MSC_VER __declspec(align(64)) #else __attribute__((aligned(64))) #endif float tmp[6][6][16]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 16; for (int m = 0; m < 6; m++) { __m512 _r00 = _mm512_load_ps(r0); __m512 _r01 = _mm512_load_ps(r0 + 16); __m512 _r02 = _mm512_load_ps(r0 + 16 * 2); __m512 _r03 = _mm512_load_ps(r0 + 16 * 3); __m512 _r04 = _mm512_load_ps(r0 + 16 * 4); __m512 _r05 = _mm512_load_ps(r0 + 16 * 5); __m512 _tmp0m = _mm512_fmadd_ps(_mm512_set1_ps(-5.f), _r02, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _r00, _r04)); __m512 _tmp1m = _mm512_fmadd_ps(_mm512_set1_ps(-4.f), _mm512_add_ps(_r01, _r02), _mm512_add_ps(_r04, _r03)); __m512 _tmp2m = _mm512_fmadd_ps(_mm512_set1_ps(4.f), _mm512_sub_ps(_r01, _r02), _mm512_sub_ps(_r04, _r03)); __m512 _tmp3m = _mm512_fmadd_ps(_mm512_set1_ps(-2.f), _mm512_sub_ps(_r01, _r03), _mm512_sub_ps(_r04, _r02)); __m512 _tmp4m = _mm512_fmadd_ps(_mm512_set1_ps(2.f), _mm512_sub_ps(_r01, _r03), _mm512_sub_ps(_r04, _r02)); __m512 _tmp5m = _mm512_fmadd_ps(_mm512_set1_ps(-5.f), _r03, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _r01, _r05)); _mm512_store_ps(tmp[0][m], _tmp0m); _mm512_store_ps(tmp[1][m], _tmp1m); _mm512_store_ps(tmp[2][m], _tmp2m); _mm512_store_ps(tmp[3][m], _tmp3m); _mm512_store_ps(tmp[4][m], _tmp4m); _mm512_store_ps(tmp[5][m], _tmp5m); r0 += w * 16; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 16; float* r0_tm_1 = r0_tm_0 + tiles * 16; float* r0_tm_2 = r0_tm_0 + tiles * 16 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 16 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 16 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 16 * 5; for (int m = 0; m < 6; m++) { __m512 _tmp00 = _mm512_load_ps(tmp[m][0]); __m512 _tmp01 = _mm512_load_ps(tmp[m][1]); __m512 _tmp02 = _mm512_load_ps(tmp[m][2]); __m512 _tmp03 = _mm512_load_ps(tmp[m][3]); __m512 _tmp04 = _mm512_load_ps(tmp[m][4]); __m512 _tmp05 = _mm512_load_ps(tmp[m][5]); __m512 _r0tm0 = _mm512_fmadd_ps(_mm512_set1_ps(-5.f), _tmp02, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp00, _tmp04)); __m512 _r0tm1 = _mm512_fmadd_ps(_mm512_set1_ps(-4.f), _mm512_add_ps(_tmp01, _tmp02), _mm512_add_ps(_tmp04, _tmp03)); __m512 _r0tm2 = _mm512_fmadd_ps(_mm512_set1_ps(4.f), _mm512_sub_ps(_tmp01, _tmp02), _mm512_sub_ps(_tmp04, _tmp03)); __m512 _r0tm3 = _mm512_fmadd_ps(_mm512_set1_ps(-2.f), _mm512_sub_ps(_tmp01, _tmp03), _mm512_sub_ps(_tmp04, _tmp02)); __m512 _r0tm4 = _mm512_fmadd_ps(_mm512_set1_ps(2.f), _mm512_sub_ps(_tmp01, _tmp03), _mm512_sub_ps(_tmp04, _tmp02)); __m512 _r0tm5 = _mm512_fmadd_ps(_mm512_set1_ps(-5.f), _tmp03, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp01, _tmp05)); _mm512_store_ps(r0_tm_0, _r0tm0); _mm512_store_ps(r0_tm_1, _r0tm1); _mm512_store_ps(r0_tm_2, _r0tm2); _mm512_store_ps(r0_tm_3, _r0tm3); _mm512_store_ps(r0_tm_4, _r0tm4); _mm512_store_ps(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 96; r0_tm_1 += tiles * 96; r0_tm_2 += tiles * 96; r0_tm_3 += tiles * 96; r0_tm_4 += tiles * 96; r0_tm_5 += tiles * 96; } } } } } static void conv3x3s1_winograd43_transform_output_pack16_avx512(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); __m512 _bias0 = biasptr ? _mm512_loadu_ps(biasptr + p * 16) : _mm512_setzero_ps(); #ifdef _MSC_VER __declspec(align(64)) #else __attribute__((aligned(64))) #endif float tmp[4][6][16]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 16; const float* output0_tm_1 = output0_tm_0 + tiles * 16; const float* output0_tm_2 = output0_tm_0 + tiles * 16 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 16 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 16 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 16 * 5; float* output0 = out0.row(i * 4) + (j * 4) * 16; for (int m = 0; m < 6; m++) { __m512 _out0tm0 = _mm512_load_ps(output0_tm_0); __m512 _out0tm1 = _mm512_load_ps(output0_tm_1); __m512 _out0tm2 = _mm512_load_ps(output0_tm_2); __m512 _out0tm3 = _mm512_load_ps(output0_tm_3); __m512 _out0tm4 = _mm512_load_ps(output0_tm_4); __m512 _out0tm5 = _mm512_load_ps(output0_tm_5); __m512 _tmp02a = _mm512_add_ps(_out0tm1, _out0tm2); __m512 _tmp13a = _mm512_sub_ps(_out0tm1, _out0tm2); __m512 _tmp02b = _mm512_add_ps(_out0tm3, _out0tm4); __m512 _tmp13b = _mm512_sub_ps(_out0tm3, _out0tm4); __m512 _tmp0m = _mm512_add_ps(_mm512_add_ps(_out0tm0, _tmp02a), _tmp02b); __m512 _tmp1m = _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp13b, _tmp13a); __m512 _tmp2m = _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp02b, _tmp02a); __m512 _tmp3m = _mm512_fmadd_ps(_mm512_set1_ps(8.f), _tmp13b, _mm512_add_ps(_out0tm5, _tmp13a)); _mm512_store_ps(tmp[0][m], _tmp0m); _mm512_store_ps(tmp[1][m], _tmp1m); _mm512_store_ps(tmp[2][m], _tmp2m); _mm512_store_ps(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 96; output0_tm_1 += tiles * 96; output0_tm_2 += tiles * 96; output0_tm_3 += tiles * 96; output0_tm_4 += tiles * 96; output0_tm_5 += tiles * 96; } for (int m = 0; m < 4; m++) { __m512 _tmp00 = _mm512_load_ps(tmp[m][0]); __m512 _tmp01 = _mm512_load_ps(tmp[m][1]); __m512 _tmp02 = _mm512_load_ps(tmp[m][2]); __m512 _tmp03 = _mm512_load_ps(tmp[m][3]); __m512 _tmp04 = _mm512_load_ps(tmp[m][4]); __m512 _tmp05 = _mm512_load_ps(tmp[m][5]); __m512 _tmp02a = _mm512_add_ps(_tmp01, _tmp02); __m512 _tmp13a = _mm512_sub_ps(_tmp01, _tmp02); __m512 _tmp02b = _mm512_add_ps(_tmp03, _tmp04); __m512 _tmp13b = _mm512_sub_ps(_tmp03, _tmp04); __m512 _out00 = _mm512_add_ps(_bias0, _mm512_add_ps(_mm512_add_ps(_tmp00, _tmp02a), _tmp02b)); __m512 _out01 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(2.f), _tmp13b, _tmp13a)); __m512 _out02 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(4.f), _tmp02b, _tmp02a)); __m512 _out03 = _mm512_add_ps(_bias0, _mm512_fmadd_ps(_mm512_set1_ps(8.f), _tmp13b, _mm512_add_ps(_tmp05, _tmp13a))); _mm512_store_ps(output0, _out00); _mm512_store_ps(output0 + 16, _out01); _mm512_store_ps(output0 + 16 * 2, _out02); _mm512_store_ps(output0 + 16 * 3, _out03); output0 += outw * 16; } } } } }
zero_omp.c
/* * File: zero_omp.c * Author: Philip Mucci * mucci@cs.utk.edu * Mods: Nils Smeds * smeds@pdc.kth.se * Anders Nilsson * anni@pdc.kth.se */ /* This file performs the following test: start, stop and timer functionality for 2 slave OMP threads - It attempts to use the following two counters. It may use less depending on hardware counter resource limitations. These are counted in the default counting domain and default granularity, depending on the platform. Usually this is the user domain (PAPI_DOM_USER) and thread context (PAPI_GRN_THR). + PAPI_FP_INS + PAPI_TOT_CYC Each thread inside the Thread routine: - Get cyc. - Get us. - Start counters - Do flops - Stop and read counters - Get us. - Get cyc. Master serial thread: - Get us. - Get cyc. - Run parallel for loop - Get us. - Get cyc. */ #include <stdio.h> #include <stdlib.h> #include "papi.h" #include "papi_test.h" #include "do_loops.h" #ifdef _OPENMP #include <omp.h> #else #error "This compiler does not understand OPENMP" #endif const PAPI_hw_info_t *hw_info = NULL; void Thread( int n ) { int retval, num_tests = 1; int EventSet1 = PAPI_NULL; int PAPI_event, mask1; int num_events1; long long **values; long long elapsed_us, elapsed_cyc; char event_name[PAPI_MAX_STR_LEN]; if (!TESTS_QUIET) { printf( "Thread %#x started\n", omp_get_thread_num( ) ); } /* add PAPI_TOT_CYC and one of the events in PAPI_FP_INS, PAPI_FP_OPS or PAPI_TOT_INS, depending on the availability of the event on the platform */ EventSet1 = add_two_events( &num_events1, &PAPI_event, &mask1 ); if (num_events1==0) { if (!TESTS_QUIET) printf("No events added!\n"); test_fail(__FILE__,__LINE__,"No events",0); } retval = PAPI_event_code_to_name( PAPI_event, event_name ); if ( retval != PAPI_OK ) test_fail( __FILE__, __LINE__, "PAPI_event_code_to_name", retval ); values = allocate_test_space( num_tests, num_events1 ); elapsed_us = PAPI_get_real_usec( ); elapsed_cyc = PAPI_get_real_cyc( ); retval = PAPI_start( EventSet1 ); if ( retval != PAPI_OK ) test_fail( __FILE__, __LINE__, "PAPI_start", retval ); do_flops( n ); retval = PAPI_stop( EventSet1, values[0] ); if ( retval != PAPI_OK ) test_fail( __FILE__, __LINE__, "PAPI_stop", retval ); elapsed_us = PAPI_get_real_usec( ) - elapsed_us; elapsed_cyc = PAPI_get_real_cyc( ) - elapsed_cyc; remove_test_events( &EventSet1, mask1 ); if ( !TESTS_QUIET ) { printf( "Thread %#x %-12s : \t%lld\n", omp_get_thread_num( ), event_name, values[0][1] ); printf( "Thread %#x PAPI_TOT_CYC: \t%lld\n", omp_get_thread_num( ), values[0][0] ); printf( "Thread %#x Real usec : \t%lld\n", omp_get_thread_num( ), elapsed_us ); printf( "Thread %#x Real cycles : \t%lld\n", omp_get_thread_num( ), elapsed_cyc ); } /* It is illegal for the threads to exit in OpenMP */ /* test_pass(__FILE__,0,0); */ free_test_space( values, num_tests ); PAPI_unregister_thread( ); if (!TESTS_QUIET) { printf( "Thread %#x finished\n", omp_get_thread_num( ) ); } } unsigned long omp_get_thread_num_wrapper(void){ return (unsigned long)omp_get_thread_num(); } int main( int argc, char **argv ) { int retval; long long elapsed_us, elapsed_cyc; int quiet; /* Set TESTS_QUIET variable */ quiet = tests_quiet( argc, argv ); retval = PAPI_library_init( PAPI_VER_CURRENT ); if ( retval != PAPI_VER_CURRENT ) { test_fail( __FILE__, __LINE__, "PAPI_library_init", retval ); } hw_info = PAPI_get_hardware_info( ); if ( hw_info == NULL ) { test_fail( __FILE__, __LINE__, "PAPI_get_hardware_info", 2 ); } if (PAPI_query_event(PAPI_TOT_INS)!=PAPI_OK) { if (!quiet) printf("Can't find PAPI_TOT_INS\n"); test_skip(__FILE__,__LINE__,"Event missing",1); } if (PAPI_query_event(PAPI_TOT_CYC)!=PAPI_OK) { if (!quiet) printf("Can't find PAPI_TOT_CYC\n"); test_skip(__FILE__,__LINE__,"Event missing",1); } elapsed_us = PAPI_get_real_usec( ); elapsed_cyc = PAPI_get_real_cyc( ); retval = PAPI_thread_init( omp_get_thread_num_wrapper ); if ( retval != PAPI_OK ) { if ( retval == PAPI_ECMP ) { if (!quiet) printf("Trouble init threads\n"); test_skip( __FILE__, __LINE__, "PAPI_thread_init", retval ); } else { test_fail( __FILE__, __LINE__, "PAPI_thread_init", retval ); } } #pragma omp parallel { Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) ); } omp_set_num_threads( 1 ); Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) ); omp_set_num_threads( omp_get_max_threads( ) ); #pragma omp parallel { Thread( 1000000 * ( omp_get_thread_num( ) + 1 ) ); } elapsed_cyc = PAPI_get_real_cyc( ) - elapsed_cyc; elapsed_us = PAPI_get_real_usec( ) - elapsed_us; if ( !TESTS_QUIET ) { printf( "Master real usec : \t%lld\n", elapsed_us ); printf( "Master real cycles : \t%lld\n", elapsed_cyc ); } test_pass( __FILE__ ); return 0; }
omp_task_shared.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" /* Utility function do spend some time in a loop */ int test_omp_task_imp_shared() { int i; int k = 0; int result = 0; i=0; #pragma omp parallel { #pragma omp single for (k = 0; k < NUM_TASKS; k++) { #pragma omp task shared(i) { #pragma omp atomic i++; //this should be shared implicitly } } } result = i; return ((result == NUM_TASKS)); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_task_imp_shared()) { num_failed++; } } return num_failed; }
GB_unop.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply // op(A') function: GB_unop_tran // C type: GB_ctype // A type: GB_atype // cast: GB_cast(cij,aij) // unaryop: GB_unaryop(cij,aij) #define GB_ATYPE \ GB_atype #define GB_CTYPE \ GB_ctype // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GB_geta(aij,Ax,pA) #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ GB_unaryop(z, x) ; // casting #define GB_CAST(z, aij) \ GB_cast(z, aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_geta(aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_cast(z, aij) ; \ GB_unaryop(Cx [pC], z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ GB_op_is_identity_with_no_typecast // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ GB_disable //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply ( GB_ctype *Cx, // Cx and Ax may be aliased const GB_atype *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GB_atype), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_geta(aij, Ax, p) ; GB_cast(z, aij) ; GB_unaryop(Cx [p], z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GB_geta(aij, Ax, p) ; GB_cast(z, aij) ; GB_unaryop(Cx [p], z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
target-8.c
/* { dg-do run } */ void foo (int *p) { int i; #pragma omp parallel #pragma omp single #pragma omp target teams distribute parallel for map(p[0:24]) for (i = 0; i < 24; i++) p[i] = p[i] + 1; } int main () { int p[24], i; for (i = 0; i < 24; i++) p[i] = i; foo (p); for (i = 0; i < 24; i++) if (p[i] != i + 1) __builtin_abort (); return 0; }
GB_binop__rminus_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rminus_fp32 // A.*B function (eWiseMult): GB_AemultB__rminus_fp32 // A*D function (colscale): GB_AxD__rminus_fp32 // D*A function (rowscale): GB_DxB__rminus_fp32 // C+=B function (dense accum): GB_Cdense_accumB__rminus_fp32 // C+=b function (dense accum): GB_Cdense_accumb__rminus_fp32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_fp32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_fp32 // C=scalar+B GB_bind1st__rminus_fp32 // C=scalar+B' GB_bind1st_tran__rminus_fp32 // C=A+scalar GB_bind2nd__rminus_fp32 // C=A'+scalar GB_bind2nd_tran__rminus_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (y - x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FP32 || GxB_NO_RMINUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rminus_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rminus_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rminus_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rminus_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rminus_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rminus_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__rminus_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rminus_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rminus_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rminus_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB_bind1st_tran__rminus_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB_bind2nd_tran__rminus_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
invertir_para.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #define num_threads 12 int main() { FILE *image, *outputImage, *lecturas; image = fopen("f10.bmp", "rb"); //Imagen original a transformar outputImage = fopen("f10para.bmp", "wb"); //Imagen transformada long ancho; long alto; unsigned char aux = 0; unsigned char r, g, b; //Pixel double t1, t2, tiempo; t1 = omp_get_wtime(); omp_set_num_threads(num_threads); unsigned char xx[54]; for (int i = 0; i < 54; i++) { xx[i] = fgetc(image); fputc(xx[i], outputImage); //Copia cabecera a nueva imagen } ancho = (long)xx[20] * 65536 + (long)xx[19] * 256 + (long)xx[18]; alto = (long)xx[24] * 65536 + (long)xx[23] * 256 + (long)xx[22]; long anchox3 = ancho * 3; int n = 0; if (anchox3 % 4 != 0) { anchox3 = anchox3 / 4; anchox3 = ((int)anchox3) * 4 + 4; n = anchox3 - ancho * 3; } printf("largo img %li\n", alto); printf("ancho img %li\n", ancho); unsigned char *arr_in = (unsigned char *)malloc((ancho + n) * alto * 3 * sizeof(unsigned char)); unsigned char *arr_out = (unsigned char *)malloc((ancho + n) * alto * 3 * sizeof(unsigned char)); unsigned char *arr_blur = (unsigned char *)malloc((ancho + n) * alto * 3 * sizeof(unsigned char)); int j = 0; while (!feof(image)) { *(arr_in + j) = fgetc(image); *(arr_out + j) = *(arr_in + j); *(arr_blur + j) = *(arr_in + j); j++; } printf("%d\n", j); int count = 0; ////#pragma omp parallel //{ ////#pragma omp for ordered #pragma omp parallel for schedule(static) for (int i = 0; i < (ancho + n) * alto * 3; i += 3) { b = *(arr_in + i); g = *(arr_in + i + 1); r = *(arr_in + i + 2); unsigned char pixel = 0.21 * r + 0.72 * g + 0.07 * b; *(arr_in + i) = pixel; *(arr_in + i + 1) = pixel; *(arr_in + i + 2) = pixel; count += 3; if (count == ancho * 3) { i += 2; count = 0; } } printf("FINISH GRAY CONVERT\n"); ////#pragma omp for ordered #pragma omp parallel for schedule(static) for (int i = 0; i < alto; i++) { for (int j = 0; j < (ancho * 3); j += 3) { *(arr_out + (i * ((ancho * 3) + n)) + j) = *(arr_in + (i * ((ancho * 3) + n)) + (ancho * 3) - j); *(arr_out + (i * ((ancho * 3) + n)) + j + 1) = *(arr_in + (i * ((ancho * 3) + n)) + (ancho * 3) - j - 1); *(arr_out + (i * ((ancho * 3) + n)) + j + 2) = *(arr_in + (i * ((ancho * 3) + n)) + (ancho * 3) - j - 2); } } aux = 0; int mask = 11; printf("FINISH SHIFT CONVERT\n"); #pragma omp parallel for schedule(guided) for (int i = 0; i < alto; i++) { for (int j = 0; j < (ancho * 3); j += 3) { aux = 0.0; for (int k = -(mask / 2); k <= mask / 2; k++) { for (int l = -(mask * 3 / 2); l <= mask * 3 / 2; l += 3) { aux += *(arr_out + ((i + k + 1) * ((ancho * 3) + n)) + j + l + 1) / (mask * mask); } } *(arr_blur + (i * ((ancho * 3) + n)) + j) = aux; *(arr_blur + (i * ((ancho * 3) + n)) + j + 1) = aux; *(arr_blur + (i * ((ancho * 3) + n)) + j + 2) = aux; } } //} printf("FINISH BLUR CONVERT\n"); for (int i = 0; i < (ancho + n) * alto * 3; i++) { fputc(*(arr_blur + i), outputImage); } t2 = omp_get_wtime(); tiempo = t2 - t1; printf("%lf segundos\n", tiempo); fclose(image); fclose(outputImage); return 0; }
openmp-ex25.c
#include <stdio.h> #include <unistd.h> #include <omp.h> int main(void) { int num_threads; int fib = 1; int fib_prev = 0; #pragma omp parallel { /* calculating the fibonacci number of the number of threads is trivial, * but sometimes there is work that must be serial (typically I/O, or * because there is a lot of state that the threads have built up that * can't/shouldn't be recalculated) that is inconvenient to have outside * of a parallel block */ #pragma omp single { int i; num_threads = omp_get_num_threads(); for (i = 2; i <= num_threads; i++) { int fib_next = fib + fib_prev; fib_prev = fib; fib = fib_next; } printf("fib(num_threads = %d) = %d\n",num_threads,fib); } } return 0; }
tetrahedron_method.c
/* Copyright (C) 2014 Atsushi Togo */ /* All rights reserved. */ /* This file was originally part of spglib and is part of kspclib. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ /* tetrahedron_method.c */ /* Copyright (C) 2014 Atsushi Togo */ #include <stddef.h> #include <kgrid.h> #include "tetrahedron_method.h" #ifdef THMWARNING #include <stdio.h> #define warning_print(...) fprintf(stderr,__VA_ARGS__) #else #define warning_print(...) #endif /* 6-------7 */ /* /| /| */ /* / | / | */ /* 4-------5 | */ /* | 2----|--3 */ /* | / | / */ /* |/ |/ */ /* 0-------1 */ /* */ /* i: vec neighbours */ /* 0: O 1, 2, 4 */ /* 1: a 0, 3, 5 */ /* 2: b 0, 3, 6 */ /* 3: a + b 1, 2, 7 */ /* 4: c 0, 5, 6 */ /* 5: c + a 1, 4, 7 */ /* 6: c + b 2, 4, 7 */ /* 7: c + a + b 3, 5, 6 */ static int main_diagonals[4][3] = {{ 1, 1, 1}, /* 0-7 */ {-1, 1, 1}, /* 1-6 */ { 1,-1, 1}, /* 2-5 */ { 1, 1,-1}}; /* 3-4 */ static int db_relative_grid_address[4][24][4][3] = { { { { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 1, 1, 1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 0, 1}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 1, 1}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 0, 1}, { 1, 0, 1}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 0, 1}, { 0, 1, 1}, { 1, 1, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 1, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 0, 1}, { 0, 1, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 0, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, 1}, { 1, 0, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, 1}, {-1, -1, 0}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, 1}, {-1, -1, 0}, {-1, 0, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 0, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 0, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, -1, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, -1, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, -1}, { 0, -1, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, {-1, -1, -1}, { 0, -1, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, -1}, {-1, 0, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, {-1, -1, -1}, {-1, 0, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, -1, -1}, {-1, -1, 0}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, -1}, {-1, -1, 0}, {-1, 0, 0}, }, }, { { { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 0, 1, 1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 0, 1}, { 0, 1, 1}, }, { { 0, 0, 0}, {-1, 1, 0}, {-1, 1, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 0, 1}, {-1, 1, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, {-1, 1, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 1, 1}, { 0, 1, 1}, }, { { 0, 0, 0}, {-1, 0, 1}, { 0, 0, 1}, {-1, 1, 1}, }, { { 0, 0, 0}, { 0, 0, 1}, {-1, 1, 1}, { 0, 1, 1}, }, { { 0, 0, 0}, { 0, 0, 1}, { 0, -1, 0}, { 1, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 0, 1}, { 1, -1, 0}, }, { { 0, 0, 0}, {-1, 0, 1}, { 0, -1, 0}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 0, 1}, { 0, 0, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 0, -1}, { 1, 0, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 1, 0, -1}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 0, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, -1, -1}, { 1, -1, -1}, { 0, 0, -1}, }, { { 0, 0, 0}, { 0, -1, -1}, { 1, -1, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 1, -1, -1}, { 0, 0, -1}, { 1, 0, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, -1, -1}, { 1, 0, -1}, }, { { 0, 0, 0}, { 1, -1, -1}, { 0, -1, 0}, { 1, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, -1, -1}, { 1, -1, 0}, }, { { 0, 0, 0}, { 0, -1, -1}, { 0, 0, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, -1, -1}, { 0, -1, 0}, {-1, 0, 0}, }, }, { { { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 1, 0, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 0, 1}, { 1, 0, 1}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 0, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, { 0, 0, 1}, }, { { 0, 0, 0}, { 1, -1, 1}, { 0, -1, 0}, { 1, -1, 0}, }, { { 0, 0, 0}, { 0, -1, 1}, { 1, -1, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, -1, 1}, { 1, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, -1, 1}, { 1, 0, 1}, }, { { 0, 0, 0}, { 0, -1, 1}, { 1, -1, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, { 1, -1, 1}, { 0, 0, 1}, { 1, 0, 1}, }, { { 0, 0, 0}, { 0, -1, 1}, { 0, -1, 0}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, -1, 1}, { 0, 0, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 0, -1}, { 0, 1, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 1, 0}, { 0, 1, -1}, }, { { 0, 0, 0}, {-1, 0, -1}, { 0, 0, -1}, {-1, 1, -1}, }, { { 0, 0, 0}, {-1, 0, -1}, {-1, 1, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 0, -1}, {-1, 1, -1}, { 0, 1, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 1, -1}, { 0, 1, -1}, }, { { 0, 0, 0}, {-1, 1, 0}, {-1, 1, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, 1, 0}, { 0, 1, 0}, {-1, 1, -1}, }, { { 0, 0, 0}, { 0, 0, -1}, { 0, -1, 0}, { 1, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, 0, -1}, { 1, -1, 0}, }, { { 0, 0, 0}, {-1, 0, -1}, { 0, 0, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, 0, -1}, { 0, -1, 0}, {-1, 0, 0}, }, }, { { { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 0, 0, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 0, 0, 1}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 0, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 1, 0}, {-1, 0, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, -1, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 0, -1, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, {-1, -1, 1}, {-1, -1, 0}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, 1}, {-1, -1, 0}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, -1, 1}, { 0, -1, 1}, { 0, -1, 0}, }, { { 0, 0, 0}, {-1, -1, 1}, {-1, 0, 1}, {-1, 0, 0}, }, { { 0, 0, 0}, {-1, -1, 1}, { 0, -1, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, {-1, -1, 1}, {-1, 0, 1}, { 0, 0, 1}, }, { { 0, 0, 0}, { 0, 0, -1}, { 1, 0, -1}, { 1, 1, -1}, }, { { 0, 0, 0}, { 0, 0, -1}, { 0, 1, -1}, { 1, 1, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 0, -1}, { 1, 1, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 1, -1}, { 1, 1, -1}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 1, 0}, { 1, 1, -1}, }, { { 0, 0, 0}, { 0, 1, 0}, { 1, 1, 0}, { 1, 1, -1}, }, { { 0, 0, 0}, { 0, 0, -1}, { 0, 1, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 1, 0}, { 0, 1, -1}, {-1, 0, 0}, }, { { 0, 0, 0}, { 0, 0, -1}, { 1, 0, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 1, 0, 0}, { 1, 0, -1}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, -1}, {-1, -1, 0}, { 0, -1, 0}, }, { { 0, 0, 0}, { 0, 0, -1}, {-1, -1, 0}, {-1, 0, 0}, }, }, }; static void get_integration_weight_at_omegas(double *integration_weights, const int num_omegas, const double *omegas, THMCONST double tetrahedra_omegas[24][4], double (*gn)(const int, const double, const double[4]), double (*IJ)(const int, const int, const double, const double[4])); static double get_integration_weight(const double omega, THMCONST double tetrahedra_omegas[24][4], double (*gn)(const int, const double, const double[4]), double (*IJ)(const int, const int, const double, const double[4])); static int get_main_diagonal(THMCONST double rec_lattice[3][3]); static int sort_omegas(double v[4]); static double norm_squared_d3(const double a[3]); static void multiply_matrix_vector_di3(double v[3], THMCONST double a[3][3], const int b[3]); static double _f(const int n, const int m, const double omega, const double vertices_omegas[4]); static double _J(const int i, const int ci, const double omega, const double vertices_omegas[4]); static double _I(const int i, const int ci, const double omega, const double vertices_omegas[4]); static double _n(const int i, const double omega, const double vertices_omegas[4]); static double _g(const int i, const double omega, const double vertices_omegas[4]); static double _n_0(void); static double _n_1(const double omega, const double vertices_omegas[4]); static double _n_2(const double omega, const double vertices_omegas[4]); static double _n_3(const double omega, const double vertices_omegas[4]); static double _n_4(void); static double _g_0(void); static double _g_1(const double omega, const double vertices_omegas[4]); static double _g_2(const double omega, const double vertices_omegas[4]); static double _g_3(const double omega, const double vertices_omegas[4]); static double _g_4(void); static double _J_0(void); static double _J_10(const double omega, const double vertices_omegas[4]); static double _J_11(const double omega, const double vertices_omegas[4]); static double _J_12(const double omega, const double vertices_omegas[4]); static double _J_13(const double omega, const double vertices_omegas[4]); static double _J_20(const double omega, const double vertices_omegas[4]); static double _J_21(const double omega, const double vertices_omegas[4]); static double _J_22(const double omega, const double vertices_omegas[4]); static double _J_23(const double omega, const double vertices_omegas[4]); static double _J_30(const double omega, const double vertices_omegas[4]); static double _J_31(const double omega, const double vertices_omegas[4]); static double _J_32(const double omega, const double vertices_omegas[4]); static double _J_33(const double omega, const double vertices_omegas[4]); static double _J_4(void); static double _I_0(void); static double _I_10(const double omega, const double vertices_omegas[4]); static double _I_11(const double omega, const double vertices_omegas[4]); static double _I_12(const double omega, const double vertices_omegas[4]); static double _I_13(const double omega, const double vertices_omegas[4]); static double _I_20(const double omega, const double vertices_omegas[4]); static double _I_21(const double omega, const double vertices_omegas[4]); static double _I_22(const double omega, const double vertices_omegas[4]); static double _I_23(const double omega, const double vertices_omegas[4]); static double _I_30(const double omega, const double vertices_omegas[4]); static double _I_31(const double omega, const double vertices_omegas[4]); static double _I_32(const double omega, const double vertices_omegas[4]); static double _I_33(const double omega, const double vertices_omegas[4]); static double _I_4(void); void thm_get_relative_grid_address(int relative_grid_address[24][4][3], THMCONST double rec_lattice[3][3]) { int i, j, k, main_diag_index; main_diag_index = get_main_diagonal(rec_lattice); for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { for (k = 0; k < 3; k++) { relative_grid_address[i][j][k] = db_relative_grid_address[main_diag_index][i][j][k]; } } } } void thm_get_all_relative_grid_address(int relative_grid_address[4][24][4][3]) { int i, j, k, main_diag_index; for (main_diag_index = 0; main_diag_index < 4; main_diag_index++) { for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { for (k = 0; k < 3; k++) { relative_grid_address[main_diag_index][i][j][k] = db_relative_grid_address[main_diag_index][i][j][k]; } } } } } double thm_get_integration_weight(const double omega, THMCONST double tetrahedra_omegas[24][4], const char function) { if (function == 'I') { return get_integration_weight(omega, tetrahedra_omegas, _g, _I); } else { return get_integration_weight(omega, tetrahedra_omegas, _n, _J); } } void thm_get_integration_weight_at_omegas(double *integration_weights, const int num_omegas, const double *omegas, THMCONST double tetrahedra_omegas[24][4], const char function) { if (function == 'I') { get_integration_weight_at_omegas(integration_weights, num_omegas, omegas, tetrahedra_omegas, _g, _I); } else { get_integration_weight_at_omegas(integration_weights, num_omegas, omegas, tetrahedra_omegas, _n, _J); } } void thm_get_neighboring_grid_points(int neighboring_grid_points[], const int grid_point, THMCONST int relative_grid_address[][3], const int num_relative_grid_address, const int mesh[3], THMCONST int bz_grid_address[][3], const int bz_map[]) { int bzmesh[3], address_double[3], bz_address_double[3]; int i, j, bz_gp; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } for (i = 0; i < num_relative_grid_address; i++) { for (j = 0; j < 3; j++) { address_double[j] = (bz_grid_address[grid_point][j] + relative_grid_address[i][j]) * 2; bz_address_double[j] = address_double[j]; } bz_gp = bz_map[kgd_get_grid_point_double_mesh(bz_address_double, bzmesh)]; if (bz_gp == -1) { neighboring_grid_points[i] = kgd_get_grid_point_double_mesh(address_double, mesh); } else { neighboring_grid_points[i] = bz_gp; } } } void thm_get_dense_neighboring_grid_points(size_t neighboring_grid_points[], const size_t grid_point, THMCONST int relative_grid_address[][3], const int num_relative_grid_address, const int mesh[3], THMCONST int bz_grid_address[][3], const size_t bz_map[]) { int bzmesh[3], address_double[3], bz_address_double[3]; size_t i, j, bz_gp, prod_bz_mesh; prod_bz_mesh = 1; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; prod_bz_mesh *= bzmesh[i]; } for (i = 0; i < num_relative_grid_address; i++) { for (j = 0; j < 3; j++) { address_double[j] = (bz_grid_address[grid_point][j] + relative_grid_address[i][j]) * 2; bz_address_double[j] = address_double[j]; } bz_gp = bz_map[kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh)]; if (bz_gp == prod_bz_mesh) { neighboring_grid_points[i] = kgd_get_dense_grid_point_double_mesh(address_double, mesh); } else { neighboring_grid_points[i] = bz_gp; } } } static void get_integration_weight_at_omegas(double *integration_weights, const int num_omegas, const double *omegas, THMCONST double tetrahedra_omegas[24][4], double (*gn)(const int, const double, const double[4]), double (*IJ)(const int, const int, const double, const double[4])) { int i; #pragma omp parallel for for (i = 0; i < num_omegas; i++) { integration_weights[i] = get_integration_weight(omegas[i], tetrahedra_omegas, gn, IJ); } } static double get_integration_weight(const double omega, THMCONST double tetrahedra_omegas[24][4], double (*gn)(const int, const double, const double[4]), double (*IJ)(const int, const int, const double, const double[4])) { int i, j, ci; double sum; double v[4]; sum = 0; for (i = 0; i < 24; i++) { for (j = 0; j < 4; j++) { v[j] = tetrahedra_omegas[i][j]; } ci = sort_omegas(v); if (omega < v[0]) { sum += IJ(0, ci, omega, v) * gn(0, omega, v); } else { if (v[0] < omega && omega < v[1]) { sum += IJ(1, ci, omega, v) * gn(1, omega, v); } else { if (v[1] < omega && omega < v[2]) { sum += IJ(2, ci, omega, v) * gn(2, omega, v); } else { if (v[2] < omega && omega < v[3]) { sum += IJ(3, ci, omega, v) * gn(3, omega, v); } else { if (v[3] < omega) { sum += IJ(4, ci, omega, v) * gn(4, omega, v); } } } } } } return sum / 6; } static int sort_omegas(double v[4]) { int i; double w[4]; i = 0; if (v[0] > v[1]) { w[0] = v[1]; w[1] = v[0]; i = 1; } else { w[0] = v[0]; w[1] = v[1]; } if (v[2] > v[3]) { w[2] = v[3]; w[3] = v[2]; } else { w[2] = v[2]; w[3] = v[3]; } if (w[0] > w[2]) { v[0] = w[2]; v[1] = w[0]; if (i == 0) { i = 4; } } else { v[0] = w[0]; v[1] = w[2]; } if (w[1] > w[3]) { v[3] = w[1]; v[2] = w[3]; if (i == 1) { i = 3; } } else { v[3] = w[3]; v[2] = w[1]; if (i == 1) { i = 5; } } if (v[1] > v[2]) { w[1] = v[1]; v[1] = v[2]; v[2] = w[1]; if (i == 4) { i = 2; } if (i == 5) { i = 1; } } else { if (i == 4) { i = 1; } if (i == 5) { i = 2; } } return i; } static int get_main_diagonal(THMCONST double rec_lattice[3][3]) { int i, shortest; double length, min_length; double main_diag[3]; shortest = 0; multiply_matrix_vector_di3(main_diag, rec_lattice, main_diagonals[0]); min_length = norm_squared_d3(main_diag); for (i = 1; i < 4; i++) { multiply_matrix_vector_di3(main_diag, rec_lattice, main_diagonals[i]); length = norm_squared_d3(main_diag); if (min_length > length) { min_length = length; shortest = i; } } return shortest; } static double norm_squared_d3(const double a[3]) { return a[0] * a[0] + a[1] * a[1] + a[2] * a[2]; } static void multiply_matrix_vector_di3(double v[3], THMCONST double a[3][3], const int b[3]) { int i; double c[3]; for (i = 0; i < 3; i++) { c[i] = a[i][0] * b[0] + a[i][1] * b[1] + a[i][2] * b[2]; } for (i = 0; i < 3; i++) { v[i] = c[i]; } } static double _f(const int n, const int m, const double omega, const double vertices_omegas[4]) { return ((omega - vertices_omegas[m]) / (vertices_omegas[n] - vertices_omegas[m])); } static double _J(const int i, const int ci, const double omega, const double vertices_omegas[4]) { switch (i) { case 0: return _J_0(); case 1: switch (ci) { case 0: return _J_10(omega, vertices_omegas); case 1: return _J_11(omega, vertices_omegas); case 2: return _J_12(omega, vertices_omegas); case 3: return _J_13(omega, vertices_omegas); } case 2: switch (ci) { case 0: return _J_20(omega, vertices_omegas); case 1: return _J_21(omega, vertices_omegas); case 2: return _J_22(omega, vertices_omegas); case 3: return _J_23(omega, vertices_omegas); } case 3: switch (ci) { case 0: return _J_30(omega, vertices_omegas); case 1: return _J_31(omega, vertices_omegas); case 2: return _J_32(omega, vertices_omegas); case 3: return _J_33(omega, vertices_omegas); } case 4: return _J_4(); } warning_print("******* Warning *******\n"); warning_print(" J is something wrong. \n"); warning_print("******* Warning *******\n"); warning_print("(line %d, %s).\n", __LINE__, __FILE__); return 0; } static double _I(const int i, const int ci, const double omega, const double vertices_omegas[4]) { switch (i) { case 0: return _I_0(); case 1: switch (ci) { case 0: return _I_10(omega, vertices_omegas); case 1: return _I_11(omega, vertices_omegas); case 2: return _I_12(omega, vertices_omegas); case 3: return _I_13(omega, vertices_omegas); } case 2: switch (ci) { case 0: return _I_20(omega, vertices_omegas); case 1: return _I_21(omega, vertices_omegas); case 2: return _I_22(omega, vertices_omegas); case 3: return _I_23(omega, vertices_omegas); } case 3: switch (ci) { case 0: return _I_30(omega, vertices_omegas); case 1: return _I_31(omega, vertices_omegas); case 2: return _I_32(omega, vertices_omegas); case 3: return _I_33(omega, vertices_omegas); } case 4: return _I_4(); } warning_print("******* Warning *******\n"); warning_print(" I is something wrong. \n"); warning_print("******* Warning *******\n"); warning_print("(line %d, %s).\n", __LINE__, __FILE__); return 0; } static double _n(const int i, const double omega, const double vertices_omegas[4]) { switch (i) { case 0: return _n_0(); case 1: return _n_1(omega, vertices_omegas); case 2: return _n_2(omega, vertices_omegas); case 3: return _n_3(omega, vertices_omegas); case 4: return _n_4(); } warning_print("******* Warning *******\n"); warning_print(" n is something wrong. \n"); warning_print("******* Warning *******\n"); warning_print("(line %d, %s).\n", __LINE__, __FILE__); return 0; } static double _g(const int i, const double omega, const double vertices_omegas[4]) { switch (i) { case 0: return _g_0(); case 1: return _g_1(omega, vertices_omegas); case 2: return _g_2(omega, vertices_omegas); case 3: return _g_3(omega, vertices_omegas); case 4: return _g_4(); } warning_print("******* Warning *******\n"); warning_print(" g is something wrong. \n"); warning_print("******* Warning *******\n"); warning_print("(line %d, %s).\n", __LINE__, __FILE__); return 0; } /* omega < omega1 */ static double _n_0(void) { return 0.0; } /* omega1 < omega < omega2 */ static double _n_1(const double omega, const double vertices_omegas[4]) { return (_f(1, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(3, 0, omega, vertices_omegas)); } /* omega2 < omega < omega3 */ static double _n_2(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas)); } /* omega2 < omega < omega3 */ static double _n_3(const double omega, const double vertices_omegas[4]) { return (1.0 - _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas)); } /* omega4 < omega */ static double _n_4(void) { return 1.0; } /* omega < omega1 */ static double _g_0(void) { return 0.0; } /* omega1 < omega < omega2 */ static double _g_1(const double omega, const double vertices_omegas[4]) { return (3 * _f(1, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) / (vertices_omegas[3] - vertices_omegas[0])); } /* omega2 < omega < omega3 */ static double _g_2(const double omega, const double vertices_omegas[4]) { return (3 / (vertices_omegas[3] - vertices_omegas[0]) * (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))); } /* omega3 < omega < omega4 */ static double _g_3(const double omega, const double vertices_omegas[4]) { return (3 * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas) / (vertices_omegas[3] - vertices_omegas[0])); } /* omega4 < omega */ static double _g_4(void) { return 0.0; } static double _J_0(void) { return 0.0; } static double _J_10(const double omega, const double vertices_omegas[4]) { return (1.0 + _f(0, 1, omega, vertices_omegas) + _f(0, 2, omega, vertices_omegas) + _f(0, 3, omega, vertices_omegas)) / 4; } static double _J_11(const double omega, const double vertices_omegas[4]) { return _f(1, 0, omega, vertices_omegas) / 4; } static double _J_12(const double omega, const double vertices_omegas[4]) { return _f(2, 0, omega, vertices_omegas) / 4; } static double _J_13(const double omega, const double vertices_omegas[4]) { return _f(3, 0, omega, vertices_omegas) / 4; } static double _J_20(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * (1.0 + _f(0, 3, omega, vertices_omegas)) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) * (1.0 + _f(0, 3, omega, vertices_omegas) + _f(0, 2, omega, vertices_omegas))) / 4 / _n_2(omega, vertices_omegas); } static double _J_21(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * (1.0 + _f(1, 3, omega, vertices_omegas) + _f(1, 2, omega, vertices_omegas)) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * (_f(1, 3, omega, vertices_omegas) + _f(1, 2, omega, vertices_omegas)) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas)) / 4 / _n_2(omega, vertices_omegas); } static double _J_22(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) * (_f(2, 1, omega, vertices_omegas) + _f(2, 0, omega, vertices_omegas))) / 4 / _n_2(omega, vertices_omegas); } static double _J_23(const double omega, const double vertices_omegas[4]) { return (_f(3, 1, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * _f(3, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) * (_f(3, 1, omega, vertices_omegas) + _f(3, 0, omega, vertices_omegas)) + _f(3, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) * _f(3, 0, omega, vertices_omegas)) / 4 / _n_2(omega, vertices_omegas); } static double _J_30(const double omega, const double vertices_omegas[4]) { return (1.0 - _f(0, 3, omega, vertices_omegas) * _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas)) / 4 / _n_3(omega, vertices_omegas); } static double _J_31(const double omega, const double vertices_omegas[4]) { return (1.0 - _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas)) / 4 / _n_3(omega, vertices_omegas); } static double _J_32(const double omega, const double vertices_omegas[4]) { return (1.0 + _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas)) / 4 / _n_3(omega, vertices_omegas); } static double _J_33(const double omega, const double vertices_omegas[4]) { return (1.0 - _f(0, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 3, omega, vertices_omegas) * (1.0 + _f(3, 0, omega, vertices_omegas) + _f(3, 1, omega, vertices_omegas) + _f(3, 2, omega, vertices_omegas))) / 4 / _n_3(omega, vertices_omegas); } static double _J_4(void) { return 0.25; } static double _I_0(void) { return 0.0; } static double _I_10(const double omega, const double vertices_omegas[4]) { return (_f(0, 1, omega, vertices_omegas) + _f(0, 2, omega, vertices_omegas) + _f(0, 3, omega, vertices_omegas)) / 3; } static double _I_11(const double omega, const double vertices_omegas[4]) { return _f(1, 0, omega, vertices_omegas) / 3; } static double _I_12(const double omega, const double vertices_omegas[4]) { return _f(2, 0, omega, vertices_omegas) / 3; } static double _I_13(const double omega, const double vertices_omegas[4]) { return _f(3, 0, omega, vertices_omegas) / 3; } static double _I_20(const double omega, const double vertices_omegas[4]) { return (_f(0, 3, omega, vertices_omegas) + _f(0, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) / (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))) / 3; } static double _I_21(const double omega, const double vertices_omegas[4]) { return (_f(1, 2, omega, vertices_omegas) + _f(1, 3, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) / (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))) / 3; } static double _I_22(const double omega, const double vertices_omegas[4]) { return (_f(2, 1, omega, vertices_omegas) + _f(2, 0, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) * _f(1, 2, omega, vertices_omegas) / (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))) / 3; } static double _I_23(const double omega, const double vertices_omegas[4]) { return (_f(3, 0, omega, vertices_omegas) + _f(3, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas) * _f(2, 1, omega, vertices_omegas) / (_f(1, 2, omega, vertices_omegas) * _f(2, 0, omega, vertices_omegas) + _f(2, 1, omega, vertices_omegas) * _f(1, 3, omega, vertices_omegas))) / 3; } static double _I_30(const double omega, const double vertices_omegas[4]) { return _f(0, 3, omega, vertices_omegas) / 3; } static double _I_31(const double omega, const double vertices_omegas[4]) { return _f(1, 3, omega, vertices_omegas) / 3; } static double _I_32(const double omega, const double vertices_omegas[4]) { return _f(2, 3, omega, vertices_omegas) / 3; } static double _I_33(const double omega, const double vertices_omegas[4]) { return (_f(3, 0, omega, vertices_omegas) + _f(3, 1, omega, vertices_omegas) + _f(3, 2, omega, vertices_omegas)) / 3; } static double _I_4(void) { return 0.0; }
par_mod_lr_interp.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "aux_interp.h" /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildModExtInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModExtInterpHost(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle = NULL; HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt total_global_cpts; /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /* Intermediate matrices */ hypre_ParCSRMatrix *As_FF, *As_FC, *W; HYPRE_Real *D_q, *D_w; hypre_CSRMatrix *As_FF_diag; hypre_CSRMatrix *As_FF_offd; hypre_CSRMatrix *As_FC_diag; hypre_CSRMatrix *As_FC_offd; hypre_CSRMatrix *W_diag; hypre_CSRMatrix *W_offd; HYPRE_Int *As_FF_diag_i; HYPRE_Int *As_FF_offd_i; HYPRE_Int *As_FC_diag_i; HYPRE_Int *As_FC_offd_i; HYPRE_Int *W_diag_i; HYPRE_Int *W_offd_i; HYPRE_Int *W_diag_j; HYPRE_Int *W_offd_j; HYPRE_Real *As_FF_diag_data; HYPRE_Real *As_FF_offd_data; HYPRE_Real *As_FC_diag_data; HYPRE_Real *As_FC_offd_data; HYPRE_Real *W_diag_data; HYPRE_Real *W_offd_data; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_BigInt *new_col_map_offd = NULL; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int new_ncols_P_offd; HYPRE_Int num_cols_P_offd; HYPRE_Int *P_marker = NULL; HYPRE_Int *dof_func_offd = NULL; /* Loop variables */ HYPRE_Int index; HYPRE_Int i, j; HYPRE_Int *cpt_array; HYPRE_Int *start_array; HYPRE_Int *startf_array; HYPRE_Int start, stop, startf, stopf; HYPRE_Int cnt_diag, cnt_offd, row, c_pt; /* Definitions */ //HYPRE_Real wall_time; HYPRE_Int n_Cpts, n_Fpts; HYPRE_Int num_threads = hypre_NumThreads(); //if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); n_Cpts = num_cpts_global[1]-num_cpts_global[0]; hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF); As_FC_diag = hypre_ParCSRMatrixDiag(As_FC); As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag); As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag); As_FC_offd = hypre_ParCSRMatrixOffd(As_FC); As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd); As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd); As_FF_diag = hypre_ParCSRMatrixDiag(As_FF); As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag); As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag); As_FF_offd = hypre_ParCSRMatrixOffd(As_FF); As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd); As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd); n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag); D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST); startf_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startf,stopf,row) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Real beta, gamma; start = (n_fine/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine/num_threads)*(my_thread_num+1); } start_array[my_thread_num+1] = stop; for (i=start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i=1; i < num_threads; i++) { cpt_array[i] += cpt_array[i-1]; } if (num_functions > 1) { HYPRE_Int *int_buf_data = NULL; HYPRE_Int num_sends, startc; HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) startf = start - cpt_array[my_thread_num-1]; else startf = 0; if (my_thread_num < num_threads-1) stopf = stop - cpt_array[my_thread_num]; else stopf = n_Fpts; startf_array[my_thread_num+1] = stopf; /* Create D_q = D_beta */ for (i=startf; i < stopf; i++) { for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++) { D_q[i] += As_FC_diag_data[j]; } for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++) { D_q[i] += As_FC_offd_data[j]; } } /* Create D_w = D_alpha + D_gamma */ row = startf; for (i=start; i < stop; i++) { if (CF_marker[i] < 0) { if (num_functions > 1) { HYPRE_Int jA, jS, jC; jC = A_diag_i[i]; for (j=S_diag_i[i]; j < S_diag_i[i+1]; j++) { jS = S_diag_j[j]; jA = A_diag_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func[jA]) { D_w[row] += A_diag_data[jC++]; } else jC++; jA = A_diag_j[jC]; } jC++; } for (j=jC; j < A_diag_i[i+1]; j++) { if (dof_func[i] == dof_func[A_diag_j[j]]) D_w[row] += A_diag_data[j]; } jC = A_offd_i[i]; for (j=S_offd_i[i]; j < S_offd_i[i+1]; j++) { jS = S_offd_j[j]; jA = A_offd_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func_offd[jA]) { D_w[row] += A_offd_data[jC++]; } else jC++; jA = A_offd_j[jC]; } jC++; } for (j=jC; j < A_offd_i[i+1]; j++) { if (dof_func[i] == dof_func_offd[A_offd_j[j]]) D_w[row] += A_offd_data[j]; } row++; } else { for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++) { D_w[row] += A_diag_data[j]; } for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++) { D_w[row] += A_offd_data[j]; } for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++) { D_w[row] -= As_FF_diag_data[j]; } for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++) { D_w[row] -= As_FF_offd_data[j]; } D_w[row] -= D_q[row]; row++; } } } for (i=startf; i<stopf; i++) { j = As_FF_diag_i[i]; if (D_w[i]) beta = 1.0/D_w[i]; else beta = 1.0; As_FF_diag_data[j] = beta*D_q[i]; if (D_q[i]) gamma = -1.0/D_q[i]; else gamma = 1.0; for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++) As_FF_diag_data[j] *= beta; for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++) As_FF_offd_data[j] *= beta; for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++) As_FC_diag_data[j] *= gamma; for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++) As_FC_offd_data[j] *= gamma; } } /* end parallel region */ W = hypre_ParMatmul(As_FF, As_FC); W_diag = hypre_ParCSRMatrixDiag(W); W_offd = hypre_ParCSRMatrixOffd(W); W_diag_i = hypre_CSRMatrixI(W_diag); W_diag_j = hypre_CSRMatrixJ(W_diag); W_diag_data = hypre_CSRMatrixData(W_diag); W_offd_i = hypre_CSRMatrixI(W_offd); W_offd_j = hypre_CSRMatrixJ(W_offd); W_offd_data = hypre_CSRMatrixData(W_offd); num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd); /*----------------------------------------------------------------------- * Intialize data for P *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts]; P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); startf = startf_array[my_thread_num]; stopf = startf_array[my_thread_num+1]; start = start_array[my_thread_num]; stop = start_array[my_thread_num+1]; if (my_thread_num > 0) c_pt = cpt_array[my_thread_num-1]; else c_pt = 0; cnt_diag = W_diag_i[startf]+c_pt; cnt_offd = W_offd_i[startf]; row = startf; for (i=start; i < stop; i++) { if (CF_marker[i] > 0) { P_diag_j[cnt_diag] = c_pt++; P_diag_data[cnt_diag++] = 1.0; } else { for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++) { P_diag_j[cnt_diag] = W_diag_j[j]; P_diag_data[cnt_diag++] = W_diag_data[j]; } for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++) { P_offd_j[cnt_offd] = W_offd_j[j]; P_offd_data[cnt_offd++] = W_offd_data[j]; } row++; } P_diag_i[i+1] = cnt_diag; P_offd_i[i+1] = cnt_offd; } } /* end parallel region */ /*----------------------------------------------------------------------- * Create matrix *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, num_cols_P_offd, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W); hypre_ParCSRMatrixColMapOffd(W) = NULL; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { HYPRE_Int *map; hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P); if (num_cols_P_offd) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < P_offd_size; i++) { P_marker[P_offd_j[i]] = 1; } new_ncols_P_offd = 0; for (i=0; i < num_cols_P_offd; i++) { if (P_marker[i]) new_ncols_P_offd++; } new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST); map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) if (P_marker[i]) { new_col_map_offd[index] = col_map_offd_P[i]; map[index++] = i; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) { P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i], new_ncols_P_offd); } hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd; hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd; hypre_TFree(map, HYPRE_MEMORY_HOST); } } hypre_MatvecCommPkgCreate(P); *P_ptr = P; /* Deallocate memory */ hypre_TFree(D_q, HYPRE_MEMORY_HOST); hypre_TFree(D_w, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(start_array, HYPRE_MEMORY_HOST); hypre_TFree(startf_array, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(As_FF); hypre_ParCSRMatrixDestroy(As_FC); hypre_ParCSRMatrixDestroy(W); return hypre_error_flag; } /*-----------------------------------------------------------------------* * Modularized Extended Interpolation *-----------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ModExtInterp"); #endif HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); HYPRE_Int ierr = 0; if (exec == HYPRE_EXEC_HOST) { ierr = hypre_BoomerAMGBuildModExtInterpHost(A,CF_marker,S,num_cpts_global,num_functions,dof_func, debug_flag,trunc_factor,max_elmts,P_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) else { ierr = hypre_BoomerAMGBuildExtInterpDevice(A,CF_marker,S,num_cpts_global,1,NULL, debug_flag,trunc_factor,max_elmts,P_ptr); } #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildModExtPIInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModExtPIInterpHost(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle = NULL; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt total_global_cpts; hypre_CSRMatrix *As_FF_ext = NULL; HYPRE_Real *As_FF_ext_data = NULL; HYPRE_Int *As_FF_ext_i = NULL; HYPRE_BigInt *As_FF_ext_j = NULL; /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /* Intermediate matrices */ hypre_ParCSRMatrix *As_FF, *As_FC, *W; HYPRE_Real *D_q, *D_w, *D_theta, *D_q_offd = NULL; hypre_CSRMatrix *As_FF_diag; hypre_CSRMatrix *As_FF_offd; hypre_CSRMatrix *As_FC_diag; hypre_CSRMatrix *As_FC_offd; hypre_CSRMatrix *W_diag; hypre_CSRMatrix *W_offd; HYPRE_Int *As_FF_diag_i; HYPRE_Int *As_FF_diag_j; HYPRE_Int *As_FF_offd_i; HYPRE_Int *As_FF_offd_j = NULL; HYPRE_Int *As_FC_diag_i; HYPRE_Int *As_FC_offd_i; HYPRE_Int *W_diag_i; HYPRE_Int *W_offd_i; HYPRE_Int *W_diag_j; HYPRE_Int *W_offd_j = NULL; HYPRE_Real *As_FF_diag_data; HYPRE_Real *As_FF_offd_data = NULL; HYPRE_Real *As_FC_diag_data; HYPRE_Real *As_FC_offd_data = NULL; HYPRE_Real *W_diag_data; HYPRE_Real *W_offd_data = NULL; HYPRE_Real *buf_data = NULL; HYPRE_Real *tmp_FF_diag_data = NULL; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_BigInt *new_col_map_offd = NULL; HYPRE_BigInt first_index; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int new_ncols_P_offd; HYPRE_Int num_cols_P_offd; HYPRE_Int *P_marker = NULL; HYPRE_Int *dof_func_offd = NULL; /* Loop variables */ HYPRE_Int index, startc, num_sends; HYPRE_Int i, j, jj, k, kk; HYPRE_Int *cpt_array; HYPRE_Int *start_array; HYPRE_Int *startf_array; HYPRE_Int start, stop, startf, stopf; HYPRE_Int cnt_diag, cnt_offd, row, c_pt; HYPRE_Int num_cols_A_FF_offd; HYPRE_Real value, value1, theta; /* Definitions */ //HYPRE_Real wall_time; HYPRE_Int n_Cpts, n_Fpts; HYPRE_Int num_threads = hypre_NumThreads(); //if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); n_Cpts = num_cpts_global[1]-num_cpts_global[0]; hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF); if (num_procs > 1) { As_FF_ext = hypre_ParCSRMatrixExtractBExt(As_FF,As_FF,1); As_FF_ext_i = hypre_CSRMatrixI(As_FF_ext); As_FF_ext_j = hypre_CSRMatrixBigJ(As_FF_ext); As_FF_ext_data = hypre_CSRMatrixData(As_FF_ext); } As_FC_diag = hypre_ParCSRMatrixDiag(As_FC); As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag); As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag); As_FC_offd = hypre_ParCSRMatrixOffd(As_FC); As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd); As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd); As_FF_diag = hypre_ParCSRMatrixDiag(As_FF); As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag); As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag); As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag); As_FF_offd = hypre_ParCSRMatrixOffd(As_FF); As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd); As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd); As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd); n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag); num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd); first_index = hypre_ParCSRMatrixRowStarts(As_FF)[0]; tmp_FF_diag_data = hypre_CTAlloc(HYPRE_Real, As_FF_diag_i[n_Fpts], HYPRE_MEMORY_HOST); D_q = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_theta = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST); startf_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,jj,k,kk,start,stop,startf,stopf,row,theta,value,value1) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); start = (n_fine/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine/num_threads)*(my_thread_num+1); } start_array[my_thread_num+1] = stop; for (i=start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i=1; i < num_threads; i++) { cpt_array[i] += cpt_array[i-1]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) startf = start - cpt_array[my_thread_num-1]; else startf = 0; if (my_thread_num < num_threads-1) stopf = stop - cpt_array[my_thread_num]; else stopf = n_Fpts; startf_array[my_thread_num+1] = stopf; for (i=startf; i < stopf; i++) { for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++) { D_q[i] += As_FC_diag_data[j]; } for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++) { D_q[i] += As_FC_offd_data[j]; } } for (j = As_FF_diag_i[startf]; j < As_FF_diag_i[stopf]; j++) { tmp_FF_diag_data[j] = As_FF_diag_data[j]; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (num_cols_A_FF_offd) { D_q_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, HYPRE_MEMORY_HOST); } index = 0; comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF); if (!comm_pkg) { hypre_MatvecCommPkgCreate(As_FF); comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { buf_data[index++] = D_q[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_q_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { HYPRE_Int *int_buf_data = NULL; HYPRE_Int num_sends, startc; HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif row = startf; for (i=start; i < stop; i++) { HYPRE_Int jA, jC, jS; if (CF_marker[i] < 0) { if (num_functions > 1) { jC = A_diag_i[i]; for (j=S_diag_i[i]; j < S_diag_i[i+1]; j++) { jS = S_diag_j[j]; jA = A_diag_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func[jA]) { D_w[row] += A_diag_data[jC++]; } else jC++; jA = A_diag_j[jC]; } jC++; } for (j=jC; j < A_diag_i[i+1]; j++) { if (dof_func[i] == dof_func[A_diag_j[j]]) D_w[row] += A_diag_data[j]; } jC = A_offd_i[i]; for (j=S_offd_i[i]; j < S_offd_i[i+1]; j++) { jS = S_offd_j[j]; jA = A_offd_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func_offd[jA]) { D_w[row] += A_offd_data[jC++]; } else jC++; jA = A_offd_j[jC]; } jC++; } for (j=jC; j < A_offd_i[i+1]; j++) { if (dof_func[i] == dof_func_offd[A_offd_j[j]]) D_w[row] += A_offd_data[j]; } row++; } else { for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++) { D_w[row] += A_diag_data[j]; } for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++) { D_w[row] += A_offd_data[j]; } for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++) { D_w[row] -= As_FF_diag_data[j]; } for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++) { D_w[row] -= As_FF_offd_data[j]; } D_w[row] -= D_q[row]; row++; } } } for (i=startf; i<stopf; i++) { for (j = As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++) { jj = As_FF_diag_j[j]; value = D_q[jj]; for (k = As_FF_diag_i[jj]+1; k < As_FF_diag_i[jj+1]; k++) { kk = As_FF_diag_j[k]; if (kk == i) { value1 = tmp_FF_diag_data[k]; value += value1; D_theta[i] += As_FF_diag_data[j]*value1/value; break; } } As_FF_diag_data[j] /= value; } for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++) { jj = As_FF_offd_j[j]; value = D_q_offd[jj]; for (k = As_FF_ext_i[jj]; k < As_FF_ext_i[jj+1]; k++) { kk = (HYPRE_Int)(As_FF_ext_j[k] - first_index); if (kk == i) { value1 = As_FF_ext_data[k]; value += value1; D_theta[i] += As_FF_offd_data[j]*value1/value; break; } } As_FF_offd_data[j] /= value; } As_FF_diag_data[As_FF_diag_i[i]] = 1.0; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=startf; i<stopf; i++) { theta = (D_theta[i]+D_w[i]); if (theta) { theta = -1.0/theta; for (j=As_FF_diag_i[i]; j < As_FF_diag_i[i+1]; j++) As_FF_diag_data[j] *= theta; for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++) As_FF_offd_data[j] *= theta; } } } /* end parallel region */ W = hypre_ParMatmul(As_FF, As_FC); W_diag = hypre_ParCSRMatrixDiag(W); W_offd = hypre_ParCSRMatrixOffd(W); W_diag_i = hypre_CSRMatrixI(W_diag); W_diag_j = hypre_CSRMatrixJ(W_diag); W_diag_data = hypre_CSRMatrixData(W_diag); W_offd_i = hypre_CSRMatrixI(W_offd); W_offd_j = hypre_CSRMatrixJ(W_offd); W_offd_data = hypre_CSRMatrixData(W_offd); num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd); /*----------------------------------------------------------------------- * Intialize data for P *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts]; P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); startf = startf_array[my_thread_num]; stopf = startf_array[my_thread_num+1]; start = start_array[my_thread_num]; stop = start_array[my_thread_num+1]; if (my_thread_num > 0) c_pt = cpt_array[my_thread_num-1]; else c_pt = 0; cnt_diag = W_diag_i[startf]+c_pt; cnt_offd = W_offd_i[startf]; row = startf; for (i=start; i < stop; i++) { if (CF_marker[i] > 0) { P_diag_j[cnt_diag] = c_pt++; P_diag_data[cnt_diag++] = 1.0; } else { for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++) { P_diag_j[cnt_diag] = W_diag_j[j]; P_diag_data[cnt_diag++] = W_diag_data[j]; } for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++) { P_offd_j[cnt_offd] = W_offd_j[j]; P_offd_data[cnt_offd++] = W_offd_data[j]; } row++; } P_diag_i[i+1] = cnt_diag; P_offd_i[i+1] = cnt_offd; } } /* end parallel region */ /*----------------------------------------------------------------------- * Create matrix *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, num_cols_P_offd, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W); hypre_ParCSRMatrixColMapOffd(W) = NULL; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { HYPRE_Int *map; hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P); if (num_cols_P_offd) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < P_offd_size; i++) P_marker[P_offd_j[i]] = 1; new_ncols_P_offd = 0; for (i=0; i < num_cols_P_offd; i++) if (P_marker[i]) new_ncols_P_offd++; new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST); map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) if (P_marker[i]) { new_col_map_offd[index] = col_map_offd_P[i]; map[index++] = i; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) { P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i], new_ncols_P_offd); } hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd; hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd; hypre_TFree(map, HYPRE_MEMORY_HOST); } } hypre_MatvecCommPkgCreate(P); *P_ptr = P; /* Deallocate memory */ hypre_TFree(D_q, HYPRE_MEMORY_HOST); hypre_TFree(D_q_offd, HYPRE_MEMORY_HOST); hypre_TFree(D_w, HYPRE_MEMORY_HOST); hypre_TFree(D_theta, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(start_array, HYPRE_MEMORY_HOST); hypre_TFree(startf_array, HYPRE_MEMORY_HOST); hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_TFree(tmp_FF_diag_data, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(As_FF); hypre_ParCSRMatrixDestroy(As_FC); hypre_ParCSRMatrixDestroy(W); hypre_CSRMatrixDestroy(As_FF_ext); return hypre_error_flag; } /*-----------------------------------------------------------------------* * Modularized Extended+i Interpolation *-----------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ExtPIInterp"); #endif HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); HYPRE_Int ierr = 0; if (exec == HYPRE_EXEC_HOST) { ierr = hypre_BoomerAMGBuildModExtPIInterpHost(A, CF_marker, S, num_cpts_global, debug_flag, num_functions, dof_func, trunc_factor, max_elmts, P_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) else { ierr = hypre_BoomerAMGBuildExtPIInterpDevice(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, P_ptr); } #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildModExtPEInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModExtPEInterpHost(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle = NULL; HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt total_global_cpts; /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /* Intermediate matrices */ hypre_ParCSRMatrix *As_FF, *As_FC, *W; HYPRE_Real *D_beta, *D_w, *D_lambda, *D_tmp, *D_tau, *D_tmp_offd = NULL; hypre_CSRMatrix *As_FF_diag; hypre_CSRMatrix *As_FF_offd; hypre_CSRMatrix *As_FC_diag; hypre_CSRMatrix *As_FC_offd; hypre_CSRMatrix *W_diag; hypre_CSRMatrix *W_offd; HYPRE_Int *As_FF_diag_i; HYPRE_Int *As_FF_diag_j; HYPRE_Int *As_FF_offd_i; HYPRE_Int *As_FF_offd_j; HYPRE_Int *As_FC_diag_i; HYPRE_Int *As_FC_offd_i; HYPRE_Int *W_diag_i; HYPRE_Int *W_offd_i; HYPRE_Int *W_diag_j; HYPRE_Int *W_offd_j = NULL; HYPRE_Real *As_FF_diag_data; HYPRE_Real *As_FF_offd_data = NULL; HYPRE_Real *As_FC_diag_data; HYPRE_Real *As_FC_offd_data = NULL; HYPRE_Real *W_diag_data; HYPRE_Real *W_offd_data = NULL; HYPRE_Real *buf_data = NULL; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_BigInt *new_col_map_offd = NULL; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int new_ncols_P_offd; HYPRE_Int num_cols_P_offd; HYPRE_Int *P_marker = NULL; HYPRE_Int *dof_func_offd = NULL; /* Loop variables */ HYPRE_Int index, startc, num_sends; HYPRE_Int i, j; HYPRE_Int *cpt_array; HYPRE_Int *start_array; HYPRE_Int *startf_array; HYPRE_Int start, stop, startf, stopf; HYPRE_Int cnt_diag, cnt_offd, row, c_pt; HYPRE_Int num_cols_A_FF_offd; HYPRE_Real value, theta; /* Definitions */ //HYPRE_Real wall_time; HYPRE_Int n_Cpts, n_Fpts; HYPRE_Int num_threads = hypre_NumThreads(); //if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); n_Cpts = num_cpts_global[1]-num_cpts_global[0]; hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, S, &As_FC, &As_FF); As_FC_diag = hypre_ParCSRMatrixDiag(As_FC); As_FC_diag_i = hypre_CSRMatrixI(As_FC_diag); As_FC_diag_data = hypre_CSRMatrixData(As_FC_diag); As_FC_offd = hypre_ParCSRMatrixOffd(As_FC); As_FC_offd_i = hypre_CSRMatrixI(As_FC_offd); As_FC_offd_data = hypre_CSRMatrixData(As_FC_offd); As_FF_diag = hypre_ParCSRMatrixDiag(As_FF); As_FF_diag_i = hypre_CSRMatrixI(As_FF_diag); As_FF_diag_j = hypre_CSRMatrixJ(As_FF_diag); As_FF_diag_data = hypre_CSRMatrixData(As_FF_diag); As_FF_offd = hypre_ParCSRMatrixOffd(As_FF); As_FF_offd_i = hypre_CSRMatrixI(As_FF_offd); As_FF_offd_j = hypre_CSRMatrixJ(As_FF_offd); As_FF_offd_data = hypre_CSRMatrixData(As_FF_offd); n_Fpts = hypre_CSRMatrixNumRows(As_FF_diag); num_cols_A_FF_offd = hypre_CSRMatrixNumCols(As_FF_offd); D_beta = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_lambda = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_tmp = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_tau = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); D_w = hypre_CTAlloc(HYPRE_Real, n_Fpts, HYPRE_MEMORY_HOST); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); start_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST); startf_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startf,stopf,row,theta,value) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); start = (n_fine/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine/num_threads)*(my_thread_num+1); } start_array[my_thread_num+1] = stop; for (i=start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i=1; i < num_threads; i++) { cpt_array[i] += cpt_array[i-1]; } if (num_functions > 1) { HYPRE_Int *int_buf_data = NULL; HYPRE_Int num_sends, startc; HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) startf = start - cpt_array[my_thread_num-1]; else startf = 0; if (my_thread_num < num_threads-1) stopf = stop - cpt_array[my_thread_num]; else stopf = n_Fpts; startf_array[my_thread_num+1] = stopf; for (i=startf; i < stopf; i++) { HYPRE_Real number; for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++) { D_lambda[i] += As_FF_diag_data[j]; } for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++) { D_lambda[i] += As_FF_offd_data[j]; } number = (HYPRE_Real)(As_FF_diag_i[i+1]-As_FF_diag_i[i]-1+As_FF_offd_i[i+1]-As_FF_offd_i[i]); if (number) D_lambda[i] /= number; for (j=As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++) { D_beta[i] += As_FC_diag_data[j]; } for (j=As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++) { D_beta[i] += As_FC_offd_data[j]; } if (D_lambda[i]+D_beta[i]) D_tmp[i] = D_lambda[i]/(D_beta[i]+D_lambda[i]); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (num_cols_A_FF_offd) { D_tmp_offd = hypre_CTAlloc(HYPRE_Real, num_cols_A_FF_offd, HYPRE_MEMORY_HOST); } index = 0; comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF); if (!comm_pkg) { hypre_MatvecCommPkgCreate(As_FF); comm_pkg = hypre_ParCSRMatrixCommPkg(As_FF); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { buf_data[index++] = D_tmp[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, D_tmp_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif row = startf; for (i=start; i < stop; i++) { if (CF_marker[i] < 0) { if (num_functions > 1) { HYPRE_Int jA, jC, jS; jC = A_diag_i[i]; for (j=S_diag_i[i]; j < S_diag_i[i+1]; j++) { jS = S_diag_j[j]; jA = A_diag_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func[jA]) { D_w[row] += A_diag_data[jC++]; } else jC++; jA = A_diag_j[jC]; } jC++; } for (j=jC; j < A_diag_i[i+1]; j++) { if (dof_func[i] == dof_func[A_diag_j[j]]) D_w[row] += A_diag_data[j]; } jC = A_offd_i[i]; for (j=S_offd_i[i]; j < S_offd_i[i+1]; j++) { jS = S_offd_j[j]; jA = A_offd_j[jC]; while (jA != jS) { if (dof_func[i] == dof_func_offd[jA]) { D_w[row] += A_offd_data[jC++]; } else jC++; jA = A_offd_j[jC]; } jC++; } for (j=jC; j < A_offd_i[i+1]; j++) { if (dof_func[i] == dof_func_offd[A_offd_j[j]]) D_w[row] += A_offd_data[j]; } row++; } else { for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++) { D_w[row] += A_diag_data[j]; } for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++) { D_w[row] += A_offd_data[j]; } for (j=As_FF_diag_i[row]+1; j < As_FF_diag_i[row+1]; j++) { D_w[row] -= As_FF_diag_data[j]; } for (j=As_FF_offd_i[row]; j < As_FF_offd_i[row+1]; j++) { D_w[row] -= As_FF_offd_data[j]; } D_w[row] -= D_beta[row]; row++; } } } for (i=startf; i<stopf; i++) { for (j=As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++) { index = As_FF_diag_j[j]; D_tau[i] += As_FF_diag_data[j]*D_tmp[index]; } for (j=As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++) { index = As_FF_offd_j[j]; D_tau[i] += As_FF_offd_data[j]*D_tmp_offd[index]; } } for (i=startf; i<stopf; i++) { value = D_w[i]+D_tau[i]; if (value) value = -1.0/value; theta = D_beta[i]+D_lambda[i]; As_FF_diag_data[As_FF_diag_i[i]] = value*theta; if (theta) theta = 1.0/theta; for (j = As_FF_diag_i[i]+1; j < As_FF_diag_i[i+1]; j++) { As_FF_diag_data[j] *= value; } for (j = As_FF_offd_i[i]; j < As_FF_offd_i[i+1]; j++) { As_FF_offd_data[j] *= value; } for (j = As_FC_diag_i[i]; j < As_FC_diag_i[i+1]; j++) { As_FC_diag_data[j] *= theta; } for (j = As_FC_offd_i[i]; j < As_FC_offd_i[i+1]; j++) { As_FC_offd_data[j] *= theta; } } } /* end parallel region */ W = hypre_ParMatmul(As_FF, As_FC); W_diag = hypre_ParCSRMatrixDiag(W); W_offd = hypre_ParCSRMatrixOffd(W); W_diag_i = hypre_CSRMatrixI(W_diag); W_diag_j = hypre_CSRMatrixJ(W_diag); W_diag_data = hypre_CSRMatrixData(W_diag); W_offd_i = hypre_CSRMatrixI(W_offd); W_offd_j = hypre_CSRMatrixJ(W_offd); W_offd_data = hypre_CSRMatrixData(W_offd); num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd); /*----------------------------------------------------------------------- * Intialize data for P *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P); P_diag_size = n_Cpts + hypre_CSRMatrixI(W_diag)[n_Fpts]; P_offd_size = hypre_CSRMatrixI(W_offd)[n_Fpts]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,start,stop,startf,stopf,c_pt,row,cnt_diag,cnt_offd) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); startf = startf_array[my_thread_num]; stopf = startf_array[my_thread_num+1]; start = start_array[my_thread_num]; stop = start_array[my_thread_num+1]; if (my_thread_num > 0) c_pt = cpt_array[my_thread_num-1]; else c_pt = 0; cnt_diag = W_diag_i[startf]+c_pt; cnt_offd = W_offd_i[startf]; row = startf; for (i=start; i < stop; i++) { if (CF_marker[i] > 0) { P_diag_j[cnt_diag] = c_pt++; P_diag_data[cnt_diag++] = 1.0; } else { for (j=W_diag_i[row]; j < W_diag_i[row+1]; j++) { P_diag_j[cnt_diag] = W_diag_j[j]; P_diag_data[cnt_diag++] = W_diag_data[j]; } for (j=W_offd_i[row]; j < W_offd_i[row+1]; j++) { P_offd_j[cnt_offd] = W_offd_j[j]; P_offd_data[cnt_offd++] = W_offd_data[j]; } row++; } P_diag_i[i+1] = cnt_diag; P_offd_i[i+1] = cnt_offd; } } /* end parallel region */ /*----------------------------------------------------------------------- * Create matrix *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, num_cols_P_offd, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(W); hypre_ParCSRMatrixColMapOffd(W) = NULL; hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { HYPRE_Int *map; hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P); if (num_cols_P_offd) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < P_offd_size; i++) P_marker[P_offd_j[i]] = 1; new_ncols_P_offd = 0; for (i=0; i < num_cols_P_offd; i++) if (P_marker[i]) new_ncols_P_offd++; new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_ncols_P_offd, HYPRE_MEMORY_HOST); map = hypre_CTAlloc(HYPRE_Int, new_ncols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) if (P_marker[i]) { new_col_map_offd[index] = col_map_offd_P[i]; map[index++] = i; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) { P_offd_j[i] = hypre_BinarySearch(map, P_offd_j[i], new_ncols_P_offd); } hypre_TFree(col_map_offd_P, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd; hypre_CSRMatrixNumCols(P_offd) = new_ncols_P_offd; hypre_TFree(map, HYPRE_MEMORY_HOST); } } hypre_MatvecCommPkgCreate(P); *P_ptr = P; /* Deallocate memory */ hypre_TFree(D_tmp, HYPRE_MEMORY_HOST); hypre_TFree(D_tmp_offd, HYPRE_MEMORY_HOST); hypre_TFree(D_w, HYPRE_MEMORY_HOST); hypre_TFree(D_tau, HYPRE_MEMORY_HOST); hypre_TFree(D_beta, HYPRE_MEMORY_HOST); hypre_TFree(D_lambda, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(start_array, HYPRE_MEMORY_HOST); hypre_TFree(startf_array, HYPRE_MEMORY_HOST); hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(As_FF); hypre_ParCSRMatrixDestroy(As_FC); hypre_ParCSRMatrixDestroy(W); return hypre_error_flag; } /*-----------------------------------------------------------------------* * Modularized Extended+e Interpolation *-----------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildModExtPEInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ExtPEInterp"); #endif HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); HYPRE_Int ierr = 0; if (exec == HYPRE_EXEC_HOST) { ierr = hypre_BoomerAMGBuildModExtPEInterpHost(A, CF_marker, S, num_cpts_global, num_functions, dof_func, debug_flag, trunc_factor, max_elmts, P_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) else { ierr = hypre_BoomerAMGBuildExtPEInterpDevice(A,CF_marker,S,num_cpts_global,1,NULL, debug_flag,trunc_factor,max_elmts,P_ptr); } #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; }
linear_interpolators.h
#ifndef XFIELDS_LINEAR_INTERPOLATORS_H #define XFIELDS_LINEAR_INTERPOLATORS_H typedef struct{ int64_t ix; int64_t iy; int64_t iz; int64_t nx; int64_t ny; int64_t nz; double w000; double w100; double w010; double w110; double w001; double w101; double w011; double w111; }IndicesAndWeights; /*gpufun*/ IndicesAndWeights TriLinearInterpolatedFieldMap_compute_indeces_and_weights( TriLinearInterpolatedFieldMapData fmap, double x, double y, double z){ IndicesAndWeights iw; const double dx = TriLinearInterpolatedFieldMapData_get_dx(fmap); const double dy = TriLinearInterpolatedFieldMapData_get_dy(fmap); const double dz = TriLinearInterpolatedFieldMapData_get_dz(fmap); const double x0 = TriLinearInterpolatedFieldMapData_get_x_min(fmap); const double y0 = TriLinearInterpolatedFieldMapData_get_y_min(fmap); const double z0 = TriLinearInterpolatedFieldMapData_get_z_min(fmap); const int64_t nx = TriLinearInterpolatedFieldMapData_get_nx(fmap); const int64_t ny = TriLinearInterpolatedFieldMapData_get_ny(fmap); const int64_t nz = TriLinearInterpolatedFieldMapData_get_nz(fmap); iw.nx = nx; iw.ny = ny; iw.nz = nz; // indices iw.ix = floor((x - x0) / dx); iw.iy = floor((y - y0) / dy); iw.iz = floor((z - z0) / dz); if (iw.ix >= 0 && iw.ix < nx - 1 && iw.iy >= 0 && iw.iy < ny - 1 && iw.iz >= 0 && iw.iz < nz - 1){ // distances const double dxi = x - (x0 + iw.ix * dx); const double dyi = y - (y0 + iw.iy * dy); const double dzi = z - (z0 + iw.iz * dz); // weights iw.w000 = (1.-dxi/dx) * (1.-dyi/dy) * (1.-dzi/dz); iw.w100 = (dxi/dx) * (1.-dyi/dy) * (1.-dzi/dz); iw.w010 = (1.-dxi/dx) * (dyi/dy) * (1.-dzi/dz); iw.w110 = (dxi/dx) * (dyi/dy) * (1.-dzi/dz); iw.w001 = (1.-dxi/dx) * (1.-dyi/dy) * (dzi/dz); iw.w101 = (dxi/dx) * (1.-dyi/dy) * (dzi/dz); iw.w011 = (1.-dxi/dx) * (dyi/dy) * (dzi/dz); iw.w111 = (dxi/dx) * (dyi/dy) * (dzi/dz); } else{ iw.ix = -999; iw.iy = -999; iw.iz = -999; } return iw; } /*gpufun*/ double TriLinearInterpolatedFieldMap_interpolate_3d_map_scalar( /*gpuglmem*/ const double* map, const IndicesAndWeights iw){ double val; if (iw.ix < 0){ val = 0.; } else{ val = iw.w000 * map[iw.ix + (iw.iy ) * iw.nx + (iw.iz ) * iw.nx * iw.ny] + iw.w100 * map[iw.ix+1 + (iw.iy ) * iw.nx + (iw.iz ) * iw.nx * iw.ny] + iw.w010 * map[iw.ix+ + (iw.iy+1) * iw.nx + (iw.iz ) * iw.nx * iw.ny] + iw.w110 * map[iw.ix+1 + (iw.iy+1) * iw.nx + (iw.iz ) * iw.nx * iw.ny] + iw.w001 * map[iw.ix + (iw.iy ) * iw.nx + (iw.iz+1) * iw.nx * iw.ny] + iw.w101 * map[iw.ix+1 + (iw.iy ) * iw.nx + (iw.iz+1) * iw.nx * iw.ny] + iw.w011 * map[iw.ix+ + (iw.iy+1) * iw.nx + (iw.iz+1) * iw.nx * iw.ny] + iw.w111 * map[iw.ix+1 + (iw.iy+1) * iw.nx + (iw.iz+1) * iw.nx * iw.ny]; } return val; } /*gpukern*/ void TriLinearInterpolatedFieldMap_interpolate_3d_map_vector( TriLinearInterpolatedFieldMapData fmap, const int64_t n_points, /*gpuglmem*/ const double* x, /*gpuglmem*/ const double* y, /*gpuglmem*/ const double* z, const int64_t n_quantities, /*gpuglmem*/ const int8_t* buffer_mesh_quantities, /*gpuglmem*/ const int64_t* offsets_mesh_quantities, /*gpuglmem*/ double* particles_quantities) { #pragma omp parallel for //only_for_context cpu_openmp for (int pidx=0; pidx<n_points; pidx++){ //vectorize_over pidx n_points const IndicesAndWeights iw = TriLinearInterpolatedFieldMap_compute_indeces_and_weights( fmap, x[pidx], y[pidx], z[pidx]); for (int iq=0; iq<n_quantities; iq++){ particles_quantities[iq*n_points + pidx] = TriLinearInterpolatedFieldMap_interpolate_3d_map_scalar( (/*gpuglmem*/ double*)(buffer_mesh_quantities + offsets_mesh_quantities[iq]), iw); } }//end_vectorize } #endif
testocean.c
extern double fabs(double ); typedef int __int32_t; typedef long long __int64_t; typedef long unsigned int __darwin_size_t; typedef long __darwin_time_t; typedef __int64_t __darwin_off_t; typedef __int32_t __darwin_suseconds_t; typedef __darwin_size_t size_t; struct timeval { __darwin_time_t tv_sec; __darwin_suseconds_t tv_usec; } ; void exit(int ); void *malloc(size_t __size); int strcmp(const char *__s1, const char *__s2); extern int omp_get_thread_num(void ); typedef __darwin_off_t fpos_t; struct __sbuf { unsigned char *_base; int _size; } ; struct __sFILEX ; struct __sFILE { unsigned char *_p; int _r; int _w; short _flags; short _file; struct __sbuf _bf; int _lbfsize; void *_cookie; int ( *_close )(void *); int ( *_read )(void *, char * , int ); fpos_t ( *_seek )(void *, fpos_t , int ); int ( *_write )(void *, const char * , int ); struct __sbuf _ub; struct __sFILEX *_extra; int _ur; unsigned char _ubuf[3]; unsigned char _nbuf[1]; struct __sbuf _lb; int _blksize; fpos_t _offset; } ; typedef struct __sFILE FILE; extern FILE *__stderrp; int fprintf(FILE *restrict , const char *restrict , ...); int printf(const char *restrict , ...); int scanf(const char *restrict , ...); int sscanf(const char *restrict , const char *restrict , ...); int gettimeofday(struct timeval *restrict , void *restrict ); int min(int a, int b) { int _imopVarPre142; int _imopVarPre143; _imopVarPre142 = a <= b; if (_imopVarPre142) { _imopVarPre143 = a; } else { _imopVarPre143 = b; } return _imopVarPre143; } int simulate_ocean_currents(double **A, int n , double tol) { int done = 0; double diff; double old; int iter = 0; double **B; double **C; unsigned long int _imopVarPre146; void *_imopVarPre147; _imopVarPre146 = n * sizeof(double *); _imopVarPre147 = malloc(_imopVarPre146); B = (double **) _imopVarPre147; int k; for (k = 0; k < n; k++) { unsigned long int _imopVarPre150; void *_imopVarPre151; _imopVarPre150 = n * sizeof(double); _imopVarPre151 = malloc(_imopVarPre150); B[k] = (double *) _imopVarPre151; double *_imopVarPre159; unsigned int _imopVarPre160; unsigned long int _imopVarPre161; double *_imopVarPre162; double *_imopVarPre163; _imopVarPre159 = B[k]; _imopVarPre160 = __builtin_object_size(_imopVarPre159, 0); _imopVarPre161 = n * sizeof(double); _imopVarPre162 = A[k]; _imopVarPre163 = B[k]; __builtin___memcpy_chk(_imopVarPre163, _imopVarPre162, _imopVarPre161, _imopVarPre160); } while (!done) { iter++; diff = 0; int i; int j; for (i = 1; i < n - 1; ++i) { for (j = 1; j < n - 1; ++j) { old = A[i][j]; B[i][j] = (A[i][j] + A[i][j - 1] + A[i - 1][j] + A[i][j + 1] + A[i + 1][j]) / 5.0; double _imopVarPre165; double _imopVarPre166; _imopVarPre165 = B[i][j] - old; _imopVarPre166 = fabs(_imopVarPre165); diff += _imopVarPre166; } } C = A; A = B; B = C; if (diff / (n * n) < tol) { done = 1; } } return iter; } int simulate_ocean_currents_parallel(double **A, int dim , double tol , int procs) { double **B; double **C; int chunk = 1 + (dim - 3) / procs; int done = 0; int iter = 0; double diff = 0; #pragma omp parallel num_threads(procs) shared(A, B, dim) { void *_imopVarPre170; unsigned long int _imopVarPre169; #pragma omp master { _imopVarPre169 = dim * sizeof(double *); _imopVarPre170 = malloc(_imopVarPre169); B = (double **) _imopVarPre170; } // #pragma omp dummyFlush BARRIER_START written([globalCell]) read([globalCell]) #pragma omp barrier int _imopVarPre171; _imopVarPre171 = omp_get_thread_num(); int tid = _imopVarPre171; int _imopVarPre173; int _imopVarPre174; _imopVarPre173 = tid * dim / procs; _imopVarPre174 = min(dim, _imopVarPre173); int start = _imopVarPre174; int _imopVarPre176; int _imopVarPre177; _imopVarPre176 = (tid + 1) * dim / procs; _imopVarPre177 = min(dim, _imopVarPre176); int end = _imopVarPre177; int i; for (i = start; i < end; ++i) { unsigned long int _imopVarPre180; void *_imopVarPre181; _imopVarPre180 = dim * sizeof(double); _imopVarPre181 = malloc(_imopVarPre180); B[i] = (double *) _imopVarPre181; double *_imopVarPre189; unsigned int _imopVarPre190; unsigned long int _imopVarPre191; double *_imopVarPre192; double *_imopVarPre193; _imopVarPre189 = B[i]; _imopVarPre190 = __builtin_object_size(_imopVarPre189, 0); _imopVarPre191 = dim * sizeof(double); _imopVarPre192 = A[i]; _imopVarPre193 = B[i]; __builtin___memcpy_chk(_imopVarPre193, _imopVarPre192, _imopVarPre191, _imopVarPre190); } } #pragma omp parallel num_threads(procs) firstprivate(done) { int _imopVarPre194; _imopVarPre194 = omp_get_thread_num(); int tid = _imopVarPre194; int _imopVarPre198; int _imopVarPre199; int _imopVarPre200; _imopVarPre198 = tid * chunk; _imopVarPre199 = dim - 2; _imopVarPre200 = min(_imopVarPre199, _imopVarPre198); int start = 1 + _imopVarPre200; int _imopVarPre204; int _imopVarPre205; int _imopVarPre206; _imopVarPre204 = (tid + 1) * chunk; _imopVarPre205 = dim - 2; _imopVarPre206 = min(_imopVarPre205, _imopVarPre204); int end = 1 + _imopVarPre206; double old; double mydiff; int i; int j; while (!done) { /*A nowait clause was added to this construct to make its barrier explicit.*/ #pragma omp single nowait { iter++; } diff = 0; // #pragma omp dummyFlush BARRIER_START written([globalCell]) read([globalCell]) #pragma omp barrier mydiff = 0; for (i = start; i < end; ++i) { for (j = 1; j < dim - 1; ++j) { old = A[i][j]; B[i][j] = (A[i][j] + A[i][j - 1] + A[i - 1][j] + A[i][j + 1] + A[i + 1][j]) / 5.0; double _imopVarPre208; double _imopVarPre209; _imopVarPre208 = B[i][j] - old; _imopVarPre209 = fabs(_imopVarPre208); mydiff += _imopVarPre209; } } // #pragma omp dummyFlush ATOMIC_START written([globalCell]) read([diff]) #pragma omp atomic diff += mydiff; // #pragma omp dummyFlush ATOMIC_END written([diff]) read([]) // #pragma omp dummyFlush BARRIER_START written([]) read([C, B, A, dim, diff, tol]) #pragma omp barrier done = diff / (dim * dim) < tol; /*A nowait clause was added to this construct to make its barrier explicit.*/ #pragma omp single nowait { C = A; A = B; B = C; } // #pragma omp dummyFlush BARRIER_START written([C, B, A]) read([globalCell]) /*This explicit barrier was added as a replacement for some implicit barier.*/ #pragma omp barrier } } return iter; } double **read_input(int n) { double **X; unsigned long int _imopVarPre212; void *_imopVarPre213; _imopVarPre212 = n * sizeof(double *); _imopVarPre213 = malloc(_imopVarPre212); X = (double **) _imopVarPre213; int i; int j; for (i = 0; i < n; ++i) { unsigned long int _imopVarPre216; void *_imopVarPre217; _imopVarPre216 = n * sizeof(double); _imopVarPre217 = malloc(_imopVarPre216); X[i] = (double *) _imopVarPre217; for (j = 0; j < n; ++j) { double *_imopVarPre219; _imopVarPre219 = &X[i][j]; scanf("%lf", _imopVarPre219); } } return X; } void print_output(double **A, int n , int niter) { printf("Number of iterations = %d\n", niter); int i; int j; for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j) { double _imopVarPre221; _imopVarPre221 = A[i][j]; printf("%lf ", _imopVarPre221); } printf("\n"); } printf("\n"); } void print_statistics(struct timeval start_time, struct timeval end_time) { double _imopVarPre223; _imopVarPre223 = start_time.tv_sec + (start_time.tv_usec / 1000000.0); printf("Start time:\t%lf \n", _imopVarPre223); double _imopVarPre225; _imopVarPre225 = end_time.tv_sec + (end_time.tv_usec / 1000000.0); printf("End time:\t%lf\n", _imopVarPre225); double _imopVarPre227; _imopVarPre227 = end_time.tv_sec - start_time.tv_sec + ((end_time.tv_usec - start_time.tv_usec) / 1000000.0); printf("Total time: \t%lf (s)\n", _imopVarPre227); } void print_usage_and_exit(char *prog) { fprintf(__stderrp, "Usage: %s <nprocs> <tol> <-serial|-parallel>\n", prog); exit(1); } int main(int argc, char **argv) { struct timeval start_time; struct timeval end_time; int num_iter = 0; double tol; double **A; int procs; int dim; if (argc != 4) { char *_imopVarPre229; _imopVarPre229 = argv[0]; print_usage_and_exit(_imopVarPre229); } int *_imopVarPre232; char *_imopVarPre233; _imopVarPre232 = &procs; _imopVarPre233 = argv[1]; sscanf(_imopVarPre233, "%d", _imopVarPre232); double *_imopVarPre236; char *_imopVarPre237; _imopVarPre236 = &tol; _imopVarPre237 = argv[2]; sscanf(_imopVarPre237, "%lf", _imopVarPre236); char *option = argv[3]; int _imopVarPre238; int _imopVarPre248; int _imopVarPre249; int _imopVarPre250; _imopVarPre238 = option == ((void *) 0); if (!_imopVarPre238) { _imopVarPre248 = strcmp(option, "-serial"); _imopVarPre249 = _imopVarPre248 != 0; if (_imopVarPre249) { _imopVarPre250 = strcmp(option, "-parallel"); _imopVarPre249 = _imopVarPre250 != 0; } _imopVarPre238 = _imopVarPre249; } if (_imopVarPre238) { char *_imopVarPre252; _imopVarPre252 = argv[0]; print_usage_and_exit(_imopVarPre252); } printf("Options: Procs = %d, Tol = %lf, Execution%s\n\n", procs, tol, option); int *_imopVarPre254; _imopVarPre254 = &dim; scanf("%d", _imopVarPre254); A = read_input(dim); void *_imopVarPre257; struct timeval *_imopVarPre258; _imopVarPre257 = ((void *) 0); _imopVarPre258 = &start_time; gettimeofday(_imopVarPre258, _imopVarPre257); int _imopVarPre260; _imopVarPre260 = strcmp(option, "-serial"); if (_imopVarPre260 == 0) { num_iter = simulate_ocean_currents(A, dim, tol); } else { num_iter = simulate_ocean_currents_parallel(A, dim, tol, procs); } void *_imopVarPre263; struct timeval *_imopVarPre264; _imopVarPre263 = ((void *) 0); _imopVarPre264 = &end_time; gettimeofday(_imopVarPre264, _imopVarPre263); print_output(A, dim, num_iter); print_statistics(start_time, end_time); }
LPfold.c
/* * local pair probabilities for RNA secondary structures * * Stephan Bernhart, Ivo L Hofacker * Vienna RNA package */ /* * todo: compute energy z-score for each window * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> /* #defines FLT_MAX ... */ #include "ViennaRNA/datastructures/basic.h" #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/plotting/probabilities.h" #include "ViennaRNA/part_func.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/LPfold.h" #include "ViennaRNA/Lfold.h" #include "ViennaRNA/alphabet.h" #include "ViennaRNA/part_func_window.h" /* ################################# # GLOBAL VARIABLES # ################################# */ typedef struct { int bpp_print; /* 1 if pairing probabilities should be written to file-handle, 0 if they are returned as vrna_ep_t */ int up_print; /* 1 if unpaired probabilities should be written to file-handle, 0 if they are returned as array */ FILE *fp_pU; double **pU; FLT_OR_DBL bpp_cutoff; FILE *fp_bpp; vrna_ep_t *bpp; unsigned int bpp_max_size; unsigned int bpp_size; vrna_ep_t *stack_prob; unsigned int stack_prob_size; unsigned int stack_prob_max_size; } default_cb_data; typedef struct { FLT_OR_DBL *prml; FLT_OR_DBL *prm_l; FLT_OR_DBL *prm_l1; double **pU; double **pUO; double **pUI; double **pUM; double **pUH; } helper_arrays; /* soft constraint contributions function (interior-loops) */ typedef FLT_OR_DBL (sc_int)(vrna_fold_compound_t *, int, int, int, int); /* QI5 contribution function for unpaired probability computations */ typedef void (add_QI5)(FLT_OR_DBL **, int, int, FLT_OR_DBL, FLT_OR_DBL); /* ################################# # PRIVATE VARIABLES # ################################# */ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY #ifdef _OPENMP #include <omp.h> #endif /* some backward compatibility stuff */ PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; PRIVATE int backward_compat = 0; #ifdef _OPENMP #pragma omp threadprivate(backward_compat_compound, backward_compat) #endif #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE void alloc_helper_arrays(vrna_fold_compound_t *vc, int ulength, helper_arrays *aux_arrays, unsigned int options); PRIVATE void free_helper_arrays(vrna_fold_compound_t *vc, int ulength, helper_arrays *aux_arrays, unsigned int options); PRIVATE void compute_probs(vrna_fold_compound_t *vc, int j, helper_arrays *aux_arrays, int ulength, vrna_probs_window_callback *cb, void *data, unsigned int options, int *ov); PRIVATE void make_ptypes(vrna_fold_compound_t *vc, int j); PRIVATE void probability_correction(vrna_fold_compound_t *vc, int i); #if 0 PRIVATE vrna_ep_t *get_deppp(vrna_fold_compound_t *vc, vrna_ep_t *pl, int start); #endif PRIVATE void compute_pU(vrna_fold_compound_t *vc, int k, int ulength, helper_arrays *aux_arrays, vrna_probs_window_callback *cb, void *data, unsigned int options); PRIVATE FLT_OR_DBL * compute_stack_probabilities(vrna_fold_compound_t *vc, int start); PRIVATE void return_pU(int size, int i, int max_size, helper_arrays *aux_arrays, vrna_probs_window_callback *cb, void *data, unsigned int options); PRIVATE void print_bpp_callback(FLT_OR_DBL *pr, int size, int k, void *data); PRIVATE void store_bpp_callback(FLT_OR_DBL *pr, int size, int k, void *data); #if 0 PRIVATE void store_stack_prob_callback(FLT_OR_DBL *pr, int size, int k, void *data); #endif PRIVATE void print_pU_callback(double *pU, int size, int k, int ulength, unsigned int type, void *data); PRIVATE void store_pU_callback(double *pU, int size, int k, int ulength, unsigned int type, void *data); PRIVATE void backward_compat_callback(FLT_OR_DBL *pr, int pr_size, int i, int max, unsigned int type, void *data); PRIVATE FLT_OR_DBL sc_contribution(vrna_fold_compound_t *vc, int i, int j, int k, int l); PRIVATE FLT_OR_DBL sc_dummy(vrna_fold_compound_t *vc, int i, int j, int k, int l); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PUBLIC vrna_ep_t * vrna_pfl_fold(const char *sequence, int window_size, int max_bp_span, float cutoff) { default_cb_data data; data.fp_pU = NULL; data.pU = NULL; data.bpp_cutoff = (FLT_OR_DBL)cutoff; data.fp_bpp = NULL; data.bpp = NULL; data.bpp_max_size = 0; data.bpp_size = 0; data.stack_prob = NULL; data.stack_prob_max_size = 0; data.stack_prob_size = 0; data.bpp_print = 0; data.up_print = 0; vrna_pfl_fold_cb(sequence, window_size, max_bp_span, &backward_compat_callback, (void *)&data); /* resize pair probability list to actual size */ data.bpp = (vrna_ep_t *)vrna_realloc(data.bpp, sizeof(vrna_ep_t) * (data.bpp_size + 1)); data.bpp[data.bpp_size].i = 0; data.bpp[data.bpp_size].j = 0; data.bpp[data.bpp_size].type = VRNA_PLIST_TYPE_BASEPAIR; data.bpp[data.bpp_size].p = 0; return data.bpp; } PUBLIC double ** vrna_pfl_fold_up(const char *sequence, int ulength, int window_size, int max_bp_span) { unsigned int i; double **pU; default_cb_data data; pU = NULL; if (sequence) { i = strlen(sequence); pU = (double **)vrna_alloc(sizeof(double *) * (i + 2)); data.fp_pU = NULL; data.pU = pU; data.bpp_cutoff = 0.; data.fp_bpp = NULL; data.bpp = NULL; data.bpp_max_size = 0; data.bpp_size = 0; data.stack_prob = NULL; data.stack_prob_max_size = 0; data.stack_prob_size = 0; data.bpp_print = 0; data.up_print = 0; vrna_pfl_fold_up_cb(sequence, ulength, window_size, max_bp_span, &backward_compat_callback, (void *)&data); } return pU; } PRIVATE void alloc_helper_arrays(vrna_fold_compound_t *vc, int ulength, helper_arrays *aux_arrays, unsigned int options) { int i, n; n = vc->length; aux_arrays->pU = NULL; aux_arrays->pUO = NULL; aux_arrays->pUH = NULL; aux_arrays->pUI = NULL; aux_arrays->pUM = NULL; aux_arrays->prm_l = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (n + 2)); aux_arrays->prm_l1 = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (n + 2)); aux_arrays->prml = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (n + 2)); if ((options & VRNA_PROBS_WINDOW_UP) && (ulength > 0)) { aux_arrays->pU = (double **)vrna_alloc((n + 1) * sizeof(double *)); for (i = 1; i <= n; i++) aux_arrays->pU[i] = (double *)vrna_alloc((MAX2(MAXLOOP, ulength) + 2) * sizeof(double)); if (options & VRNA_PROBS_WINDOW_UP_SPLIT) { aux_arrays->pUO = (double **)vrna_alloc((n + 1) * sizeof(double *)); aux_arrays->pUI = (double **)vrna_alloc((n + 1) * sizeof(double *)); aux_arrays->pUM = (double **)vrna_alloc((n + 1) * sizeof(double *)); aux_arrays->pUH = (double **)vrna_alloc((n + 1) * sizeof(double *)); for (i = 1; i <= n; i++) { aux_arrays->pUH[i] = (double *)vrna_alloc((MAX2(MAXLOOP, ulength) + 2) * sizeof(double)); aux_arrays->pUI[i] = (double *)vrna_alloc((MAX2(MAXLOOP, ulength) + 2) * sizeof(double)); aux_arrays->pUO[i] = (double *)vrna_alloc((MAX2(MAXLOOP, ulength) + 2) * sizeof(double)); aux_arrays->pUM[i] = (double *)vrna_alloc((MAX2(MAXLOOP, ulength) + 2) * sizeof(double)); } } } } PRIVATE void free_helper_arrays(vrna_fold_compound_t *vc, int ulength, helper_arrays *aux_arrays, unsigned int options) { int i, n; n = vc->length; free(aux_arrays->prm_l); free(aux_arrays->prm_l1); free(aux_arrays->prml); if ((options & VRNA_PROBS_WINDOW_UP) && (ulength > 0)) { for (i = 1; i <= n; i++) free(aux_arrays->pU[i]); free(aux_arrays->pU); if (options & VRNA_PROBS_WINDOW_UP_SPLIT) { for (i = 1; i <= n; i++) { free(aux_arrays->pUH[i]); free(aux_arrays->pUI[i]); free(aux_arrays->pUO[i]); free(aux_arrays->pUM[i]); } free(aux_arrays->pUH); free(aux_arrays->pUI); free(aux_arrays->pUO); free(aux_arrays->pUM); } } } PRIVATE void return_pU(int size, int i, int max_size, helper_arrays *aux_arrays, vrna_probs_window_callback *cb, void *data, unsigned int options) { if (options & VRNA_PROBS_WINDOW_UP_SPLIT) { cb(aux_arrays->pUO[i], size, i, max_size, VRNA_PROBS_WINDOW_UP | VRNA_EXT_LOOP, data); cb(aux_arrays->pUH[i], size, i, max_size, VRNA_PROBS_WINDOW_UP | VRNA_HP_LOOP, data); cb(aux_arrays->pUI[i], size, i, max_size, VRNA_PROBS_WINDOW_UP | VRNA_INT_LOOP, data); cb(aux_arrays->pUM[i], size, i, max_size, VRNA_PROBS_WINDOW_UP | VRNA_MB_LOOP, data); } else { cb(aux_arrays->pU[i], size, i, max_size, VRNA_PROBS_WINDOW_UP | VRNA_ANY_LOOP, data); } } PRIVATE INLINE void allocate_dp_matrices(vrna_fold_compound_t *vc, int i, unsigned int options) { char **ptype; int winSize; FLT_OR_DBL **pR, **q, **qb, **qm, **qm2, **QI5, **qmb, **q2l; vrna_mx_pf_t *mx; vrna_hc_t *hc; vrna_sc_t *sc; mx = vc->exp_matrices; pR = mx->pR; q = mx->q_local; qb = mx->qb_local; qm = mx->qm_local; qm2 = mx->qm2_local; QI5 = mx->QI5; qmb = mx->qmb; q2l = mx->q2l; ptype = vc->ptype_local; winSize = vc->window_size; hc = vc->hc; /* allocate new part of arrays */ pR[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); pR[i] -= i; q[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); q[i] -= i; qb[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); qb[i] -= i; qm[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); qm[i] -= i; if (options & VRNA_PROBS_WINDOW_UP) { qm2[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); qm2[i] -= i; QI5[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); qmb[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); q2l[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); } hc->matrix_local[i] = (unsigned char *)vrna_alloc((winSize + 1) * sizeof(unsigned char)); ptype[i] = (char *)vrna_alloc((winSize + 1) * sizeof(char)); ptype[i] -= i; switch (vc->type) { case VRNA_FC_TYPE_SINGLE: sc = vc->sc; if (sc) { if (sc->exp_energy_bp_local) sc->exp_energy_bp_local[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); if (sc->exp_energy_up) sc->exp_energy_up[i] = (FLT_OR_DBL *)vrna_alloc((winSize + 1) * sizeof(FLT_OR_DBL)); vrna_sc_update(vc, i, VRNA_OPTION_PF | VRNA_OPTION_WINDOW); } break; case VRNA_FC_TYPE_COMPARATIVE: break; } } PRIVATE INLINE void free_dp_matrices(vrna_fold_compound_t *vc, unsigned int options) { char **ptype; int i, n, winSize; FLT_OR_DBL **pR, **q, **qb, **qm, **qm2, **QI5, **qmb, **q2l; vrna_mx_pf_t *mx; vrna_hc_t *hc; vrna_sc_t *sc; n = (int)vc->length; winSize = vc->window_size; mx = vc->exp_matrices; pR = mx->pR; q = mx->q_local; qb = mx->qb_local; qm = mx->qm_local; ptype = vc->ptype_local; hc = vc->hc; sc = vc->sc; for (i = MAX2(1, n - (winSize + MAXLOOP)); i <= n; i++) { free(pR[i] + i); free(q[i] + i); free(qb[i] + i); free(qm[i] + i); pR[i] = NULL; q[i] = NULL; qb[i] = NULL; qm[i] = NULL; if (options & VRNA_PROBS_WINDOW_UP) { qm2 = mx->qm2_local; QI5 = mx->QI5; qmb = mx->qmb; q2l = mx->q2l; free(qm2[i] + i); free(QI5[i]); free(qmb[i]); free(q2l[i]); qm2[i] = NULL; QI5[i] = NULL; qmb[i] = NULL; q2l[i] = NULL; } free(hc->matrix_local[i]); hc->matrix_local[i] = NULL; free(ptype[i] + i); ptype[i] = NULL; if (sc) { if (sc->exp_energy_up) free(sc->exp_energy_up[i]); if (sc->exp_energy_bp_local) free(sc->exp_energy_bp_local[i]); } } } PRIVATE INLINE void init_dp_matrices(vrna_fold_compound_t *vc, unsigned int options) { int j, max_j, winSize; winSize = vc->window_size; max_j = MIN2((int)vc->length, 2 * winSize + MAXLOOP + 2); for (j = 1; j <= max_j; j++) allocate_dp_matrices(vc, j, options); } PRIVATE INLINE void rotate_dp_matrices(vrna_fold_compound_t *vc, int j, unsigned int options) { char **ptype; int i, winSize, length; FLT_OR_DBL **pR, **q, **qb, **qm, **qm2, **QI5, **qmb, **q2l; vrna_mx_pf_t *mx; vrna_hc_t *hc; vrna_sc_t *sc; length = vc->length; winSize = vc->window_size; mx = vc->exp_matrices; pR = mx->pR; q = mx->q_local; qb = mx->qb_local; qm = mx->qm_local; ptype = vc->ptype_local; hc = vc->hc; sc = vc->sc; if (j > 2 * winSize + MAXLOOP + 1) { i = j - (2 * winSize + MAXLOOP + 1); /* free arrays may be faster than pointer rotation and reset to 0 values */ free(pR[i] + i); free(q[i] + i); free(qb[i] + i); free(qm[i] + i); pR[i] = NULL; q[i] = NULL; qb[i] = NULL; qm[i] = NULL; if (options & VRNA_PROBS_WINDOW_UP) { qm2 = mx->qm2_local; QI5 = mx->QI5; qmb = mx->qmb; q2l = mx->q2l; free(qm2[i] + i); free(QI5[i]); free(qmb[i]); free(q2l[i]); qm2[i] = NULL; QI5[i] = NULL; qmb[i] = NULL; q2l[i] = NULL; } free(hc->matrix_local[i]); hc->matrix_local[i] = NULL; free(ptype[i] + i); ptype[i] = NULL; if (sc) { if (sc->exp_energy_up) { free(sc->exp_energy_up[i]); sc->exp_energy_up[i] = NULL; } if (sc->exp_energy_bp_local) { free(sc->exp_energy_bp_local[i]); sc->exp_energy_bp_local[i] = NULL; } } if (j + 1 <= length) /* get arrays for next round */ allocate_dp_matrices(vc, j + 1, options); } } PRIVATE INLINE void init_constraints(vrna_fold_compound_t *fc, unsigned int options) { int j, max_j, winSize; winSize = fc->window_size; max_j = MIN2((int)fc->length, 2 * winSize + MAXLOOP + 2); for (j = 1; j <= max_j; j++) { make_ptypes(fc, j); vrna_hc_update(fc, j); vrna_sc_update(fc, j, VRNA_OPTION_PF | VRNA_OPTION_WINDOW); } } PRIVATE INLINE void rotate_constraints(vrna_fold_compound_t *fc, int j, unsigned int options) { if (j + 1 <= fc->length) { make_ptypes(fc, j + 1); vrna_hc_update(fc, j + 1); vrna_sc_update(fc, j + 1, VRNA_OPTION_PF | VRNA_OPTION_WINDOW); } } PUBLIC int vrna_probs_window(vrna_fold_compound_t *vc, int ulength, unsigned int options, vrna_probs_window_callback *cb, void *data) { unsigned char hc_decompose; int n, i, j, k, maxl, ov, winSize, pairSize, turn; FLT_OR_DBL temp, Qmax, qbt1, **q, **qb, **qm, **qm2, **pR; double max_real, *Fwindow; vrna_exp_param_t *pf_params; vrna_md_t *md; vrna_mx_pf_t *matrices; vrna_hc_t *hc; helper_arrays aux_arrays; vrna_mx_pf_aux_el_t aux_mx_el; vrna_mx_pf_aux_ml_t aux_mx_ml; ov = 0; Qmax = 0; if ((!vc) || (!cb)) return 0; /* failure */ if (!vrna_fold_compound_prepare(vc, VRNA_OPTION_PF | VRNA_OPTION_WINDOW)) { vrna_message_warning("vrna_probs_window: " "Failed to prepare vrna_fold_compound"); return 0; /* failure */ } /* here space for initializing everything */ n = vc->length; pf_params = vc->exp_params; md = &(pf_params->model_details); matrices = vc->exp_matrices; winSize = vc->window_size; pairSize = md->max_bp_span; turn = md->min_loop_size; q = matrices->q_local; qb = matrices->qb_local; qm = matrices->qm_local; qm2 = matrices->qm2_local; pR = matrices->pR; hc = vc->hc; alloc_helper_arrays(vc, ulength, &aux_arrays, options); Fwindow = (options & VRNA_PROBS_WINDOW_PF) ? (double *)vrna_alloc(sizeof(double) * (winSize + 1)) : NULL; /* very short molecule ? */ if (n < turn + 2) { if ((options & VRNA_PROBS_WINDOW_UP) && (ulength > 0)) { for (i = 1; i <= n; i++) { maxl = MIN2(MAX2(MAXLOOP, ulength), n); if (options & VRNA_PROBS_WINDOW_UP_SPLIT) { for (j = 0; j <= maxl; j++) { aux_arrays.pUO[i][j] = 1.; aux_arrays.pUH[i][j] = 0.; aux_arrays.pUI[i][j] = 0.; aux_arrays.pUM[i][j] = 0.; } } else { for (j = 0; j <= maxl; j++) aux_arrays.pU[i][j] = 1.; } return_pU(maxl, i, ulength, &aux_arrays, cb, data, options); } } free_helper_arrays(vc, ulength, &aux_arrays, options); return 1; /* success */ } init_dp_matrices(vc, options); init_constraints(vc, options); /* init auxiliary arrays for fast exterior/multibranch loops */ aux_mx_el = vrna_exp_E_ext_fast_init(vc); aux_mx_ml = vrna_exp_E_ml_fast_init(vc); max_real = (sizeof(FLT_OR_DBL) == sizeof(float)) ? FLT_MAX : DBL_MAX; /* start recursions */ for (j = turn + 2; j <= n + winSize; j++) { if (j <= n) { vrna_exp_E_ext_fast_update(vc, j, aux_mx_el); for (i = j - turn - 1; i >= MAX2(1, (j - winSize + 1)); i--) { hc_decompose = hc->matrix_local[i][j - i]; qbt1 = 0.; /* construction of partition function of segment i,j */ /* firstly that given i bound to j : qb(i,j) */ if (hc_decompose) { /* process hairpin loop(s) */ qbt1 += vrna_exp_E_hp_loop(vc, i, j); /* process interior loop(s) */ qbt1 += vrna_exp_E_int_loop(vc, i, j); /* process multibranch loop(s) */ qbt1 += vrna_exp_E_mb_loop_fast(vc, i, j, aux_mx_ml); } qb[i][j] = qbt1; /* Multibranch loop */ qm[i][j] = vrna_exp_E_ml_fast(vc, i, j, aux_mx_ml); if ((options & VRNA_PROBS_WINDOW_UP) && (ulength > 0)) { /* new qm2 computation done here */ const FLT_OR_DBL *qqm = vrna_exp_E_ml_fast_qqm(aux_mx_ml); temp = 0.0; for (k = i + 1; k <= j; k++) temp += qm[i][k - 1] * qqm[k]; qm2[i][j] = temp; } /* Exterior loop */ q[i][j] = temp = vrna_exp_E_ext_fast(vc, i, j, aux_mx_el); if (temp > Qmax) { Qmax = temp; if (Qmax > max_real / 10.) vrna_message_warning("vrna_probs_window: " "Q close to overflow: %d %d %g\n", i, j, temp); } if (temp >= max_real) { vrna_message_warning("vrna_probs_window: " "overflow while computing partition function for segment q[%d,%d]\n" "use larger pf_scale", i, j); vrna_exp_E_ml_fast_free(aux_mx_ml); vrna_exp_E_ext_fast_free(aux_mx_el); free_helper_arrays(vc, ulength, &aux_arrays, options); return 0; /* failure */ } } /* end for i */ /* * here we return the partition function for subsegments [i...j] in terms * of ensemble free energies G_ij = -RT * ln(Q_ij) in kcal/mol */ if (options & VRNA_PROBS_WINDOW_PF) { int start = MAX2(1, j - winSize + 1); Fwindow -= start; for (i = start; i <= j; i++) Fwindow[i] = (double)(-log(q[i][j]) - (j - i + 1) * log(pf_params->pf_scale)) * pf_params->kT / 1000.0; cb(Fwindow, j, start, winSize, VRNA_PROBS_WINDOW_PF, data); Fwindow += start; } /* * just as a general service, I save here the free energy of the windows * no output is generated, however,... */ if ((j >= winSize) && (options & VRNA_PROBS_WINDOW_UP)) { FLT_OR_DBL eee = 0.; eee = (FLT_OR_DBL)(-log(q[j - winSize + 1][j]) - winSize * log(pf_params->pf_scale)) * pf_params->kT / 1000.0; /* we could return this to the user via callback cb() if we were nice */ aux_arrays.pU[j][0] = eee; } /* rotate auxiliary arrays */ vrna_exp_E_ext_fast_rotate(aux_mx_el); vrna_exp_E_ml_fast_rotate(aux_mx_ml); } if (j > winSize) { compute_probs(vc, j, &aux_arrays, ulength, cb, data, options, &ov); if ((options & VRNA_PROBS_WINDOW_UP) && (j > winSize + MAXLOOP + 1)) compute_pU(vc, j - winSize - MAXLOOP - 1, ulength, &aux_arrays, cb, data, options); if (j > 2 * winSize + MAXLOOP + 1) { int start = j - (2 * winSize + MAXLOOP + 1); probability_correction(vc, start); if (options & VRNA_PROBS_WINDOW_BPP) { cb(pR[start], MIN2(start + winSize, n), start, winSize, VRNA_PROBS_WINDOW_BPP, data); } if (options & VRNA_PROBS_WINDOW_STACKP) { int start = j - (2 * winSize - MAXLOOP); if (start > 1) { FLT_OR_DBL *stack_probs = compute_stack_probabilities(vc, start); stack_probs -= start + 1; cb(stack_probs, MIN2(n - start + turn, pairSize), start, winSize, VRNA_PROBS_WINDOW_STACKP, data); stack_probs += start + 1; free(stack_probs); } } rotate_dp_matrices(vc, j, options); rotate_constraints(vc, j, options); } } /* end if (do_backtrack) */ } /* end for j */ /* finish output */ if (options & VRNA_PROBS_WINDOW_UP) for (j = MAX2(1, n - MAXLOOP); j <= n; j++) compute_pU(vc, j, ulength, &aux_arrays, cb, data, options); for (j = MAX2(n - winSize - MAXLOOP, 1); j <= n; j++) { probability_correction(vc, j); if (options & VRNA_PROBS_WINDOW_BPP) { cb(pR[j], MIN2(j + winSize, n), j, winSize, VRNA_PROBS_WINDOW_BPP, data); } if ((options & VRNA_PROBS_WINDOW_STACKP) && j < n) { int start = j; if (start > 1) { FLT_OR_DBL *stack_probs = compute_stack_probabilities(vc, start); stack_probs -= start + 1; cb(stack_probs, MIN2(n - start + turn, pairSize), start, winSize, VRNA_PROBS_WINDOW_STACKP, data); stack_probs += start + 1; free(stack_probs); } } } if (ov > 0) vrna_message_warning("vrna_probs_window: " "%d overflows occurred while backtracking;\n" "you might try a smaller pf_scale than %g\n", ov, pf_params->pf_scale); free_dp_matrices(vc, options); free_helper_arrays(vc, ulength, &aux_arrays, options); /* free memory occupied by auxiliary arrays for fast exterior/multibranch loops */ vrna_exp_E_ml_fast_free(aux_mx_ml); vrna_exp_E_ext_fast_free(aux_mx_el); free(Fwindow); return 1; /* success */ } PRIVATE FLT_OR_DBL sc_contribution(vrna_fold_compound_t *vc, int i, int j, int k, int l) { FLT_OR_DBL q; vrna_sc_t *sc; q = 1.; sc = vc->sc; if (sc->exp_energy_up) q *= sc->exp_energy_up[i + 1][k - i - 1] * sc->exp_energy_up[l + 1][j - l - 1]; if (sc->exp_energy_bp_local) q *= sc->exp_energy_bp_local[i][j - i]; if ((sc->exp_energy_stack) && (i + 1 == k) && (l + 1 == j)) { q *= sc->exp_energy_stack[i] * sc->exp_energy_stack[k] * sc->exp_energy_stack[l] * sc->exp_energy_stack[j]; } if (sc->f) q *= sc->f(i, j, k, l, VRNA_DECOMP_PAIR_IL, sc->data); return q; } PRIVATE FLT_OR_DBL sc_dummy(vrna_fold_compound_t *vc, int i, int j, int k, int l) { return 1.; } PRIVATE void add_QI5_contribution(FLT_OR_DBL **QI5, int i, int j, FLT_OR_DBL q, FLT_OR_DBL qkl) { QI5[i][j] += q * qkl; } PRIVATE void add_QI5_dummy(FLT_OR_DBL **QI5, int i, int j, FLT_OR_DBL q, FLT_OR_DBL qkl) { return; } PRIVATE void compute_probs(vrna_fold_compound_t *vc, int j, helper_arrays *aux_arrays, int ulength, vrna_probs_window_callback *cb, void *data, unsigned int options, int *ov) { char **ptype; short *S1; int start_i, i, k, l, n, m, winSize, turn, type, type_2, tt, *rtype; FLT_OR_DBL *prml, *prm_l, *prm_l1, **pR, **QI5, **qmb, **q2l, **qb, **q, **qm, *scale, *expMLbase, expMLclosing, temp, prm_MLb, prmt1, prmt, *tmp, Qmax; double max_real; vrna_exp_param_t *pf_params; vrna_md_t *md; vrna_hc_t *hc; vrna_sc_t *sc; sc_int *sc_int_f; add_QI5 *add_QI5_f; max_real = (sizeof(FLT_OR_DBL) == sizeof(float)) ? FLT_MAX : DBL_MAX; prml = aux_arrays->prml; prm_l = aux_arrays->prm_l; prm_l1 = aux_arrays->prm_l1; n = vc->length; winSize = vc->window_size; S1 = vc->sequence_encoding; ptype = vc->ptype_local; pf_params = vc->exp_params; md = &(pf_params->model_details); turn = md->min_loop_size; rtype = &(md->rtype[0]); expMLclosing = pf_params->expMLclosing; scale = vc->exp_matrices->scale; expMLbase = vc->exp_matrices->expMLbase; hc = vc->hc; sc = vc->sc; pR = vc->exp_matrices->pR; QI5 = vc->exp_matrices->QI5; qmb = vc->exp_matrices->qmb; q2l = vc->exp_matrices->q2l; q = vc->exp_matrices->q_local; qb = vc->exp_matrices->qb_local; qm = vc->exp_matrices->qm_local; Qmax = 0; /* assign helper functions */ if (sc) sc_int_f = &sc_contribution; else sc_int_f = &sc_dummy; if (options & VRNA_PROBS_WINDOW_UP) add_QI5_f = &add_QI5_contribution; else add_QI5_f = &add_QI5_dummy; /* start recursion */ /* i=j-winSize; */ /* initialize multiloopfs */ for (k = j - winSize; k <= MIN2(n, j); k++) { prml[k] = 0; prm_l[k] = 0; /* prm_l1[k]=0; others stay */ } k = j - winSize; prm_l1[k] = 0; for (l = k + turn + 1; l <= MIN2(n, k + winSize - 1); l++) { int a; pR[k][l] = 0; /* set zero at start */ type = vrna_get_ptype_window(k, l + k, ptype); if (qb[k][l] == 0) continue; /* Exterior loop cases */ if (hc->matrix_local[k][l - k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) { for (a = MAX2(1, l - winSize + 2); a < MIN2(k, n - winSize + 2); a++) pR[k][l] += q[a][k - 1] * q[l + 1][a + winSize - 1] / q[a][a + winSize - 1]; if (l - k + 1 == winSize) { pR[k][l] += 1. / q[k][l]; } else { if (k + winSize - 1 <= n) /* k outermost */ pR[k][l] += q[l + 1][k + winSize - 1] / q[k][k + winSize - 1]; if (l - winSize + 1 >= 1) /* l outermost */ pR[k][l] += q[l - winSize + 1][k - 1] / q[l - winSize + 1][l]; } pR[k][l] *= exp_E_ExtLoop(type, (k > 1) ? S1[k - 1] : -1, (l < n) ? S1[l + 1] : -1, pf_params); } if (hc->matrix_local[k][l - k] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC) { FLT_OR_DBL ppp; type_2 = rtype[vrna_get_ptype_window(k, l + k, ptype)]; ppp = 0.; start_i = k - MAXLOOP - 1; if (start_i < l - winSize + 1) start_i = l - winSize + 1; if (start_i < 1) start_i = 1; int u1 = 0; short sk1, sl1, si1; sk1 = S1[k - 1]; sl1 = S1[l + 1]; for (i = k - 1; i >= start_i; i--, u1++) { int max_m = i + winSize - 1; if (hc->up_int[i + 1] < u1) break; si1 = S1[i + 1]; if (max_m > l + MAXLOOP - u1 + 1) max_m = l + MAXLOOP - u1 + 1; if (max_m > n) max_m = n; for (m = l + 1; m <= max_m; m++) { int u2 = m - l - 1; if (hc->up_int[l + 1] < u2) break; if (hc->matrix_local[i][m - i] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) { type = vrna_get_ptype_window(i, m + i, ptype); if (pR[i][m] > 0) { temp = pR[i][m] * exp_E_IntLoop(u1, u2, type, type_2, si1, S1[m - 1], sk1, sl1, pf_params) * sc_int_f(vc, i, m, k, l) * scale[u1 + u2 + 2]; add_QI5_f(QI5, i, k - i - 1, temp, qb[k][l]); add_QI5_f(QI5, l, m - l - 1, temp, qb[k][l]); ppp += temp; } } } } pR[k][l] += ppp; } } /* 3. bonding k,l as substem of multi-loop enclosed by i,m */ prm_MLb = 0.; if (k > 1) { /* sonst nix! */ for (l = MIN2(n - 1, k + winSize - 2); l >= k + turn + 1; l--) { FLT_OR_DBL ppp; /* opposite direction */ m = l + 1; prmt = prmt1 = 0.0; for (i = MAX2(1, l - winSize + 2); i < k - 1 /* turn */; i++) { if (hc->matrix_local[i][m - i] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) { tt = rtype[vrna_get_ptype_window(i, m + i, ptype)]; ppp = pR[i][m] * exp_E_MLstem(tt, S1[m - 1], S1[i + 1], pf_params) * qm[i + 1][k - 1]; if (sc) if (sc->exp_energy_bp_local) ppp *= sc->exp_energy_bp_local[i][m - i]; prmt += ppp; } } prmt *= expMLclosing; prml[m] = prmt; if (hc->matrix_local[k - 1][m - k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) { tt = rtype[vrna_get_ptype_window(k - 1, m + k - 1, ptype)]; prmt1 = pR[k - 1][m] * expMLclosing * exp_E_MLstem(tt, S1[l], S1[k], pf_params); if (sc) if (sc->exp_energy_bp_local) prmt1 *= sc->exp_energy_bp_local[k - 1][m - k + 1]; } /* k-1 is unpaired */ if (hc->up_ml[k - 1]) { ppp = prm_l1[m] * expMLbase[1]; if (sc) if (sc->exp_energy_up) ppp *= sc->exp_energy_up[k - 1][1]; prm_l[m] = ppp + prmt1; } else { /* skip configuration where k-1 is unpaired */ prm_l[m] = prmt1; } /* m is unpaired */ if (hc->up_ml[m]) { ppp = prm_MLb * expMLbase[1]; if (sc) if (sc->exp_energy_up) ppp *= sc->exp_energy_up[m][1]; prm_MLb = ppp + prml[m]; } else { prm_MLb = prml[m]; } /* * same as: prm_MLb = 0; * for (i=n; i>k; i--) prm_MLb += prml[i]*expMLbase[k-i-1]; */ prml[m] = prml[m] + prm_l[m]; if (qb[k][l] == 0.) continue; if (hc->matrix_local[k][l - k] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) { tt = vrna_get_ptype_window(k, l + k, ptype); if (options & VRNA_PROBS_WINDOW_UP) { double dang; /* coefficient for computations of unpairedarrays */ dang = qb[k][l] * exp_E_MLstem(tt, (k > 1) ? S1[k - 1] : -1, (l < n) ? S1[l + 1] : -1, pf_params) * scale[2]; for (m = MIN2(k + winSize - 2, n); m >= l + 2; m--) { qmb[l][m - l - 1] += prml[m] * dang; q2l[l][m - l - 1] += (prml[m] - prm_l[m]) * dang; } } temp = prm_MLb; for (m = MIN2(k + winSize - 2, n); m >= l + 2; m--) temp += prml[m] * qm[l + 1][m - 1]; temp *= exp_E_MLstem(tt, (k > 1) ? S1[k - 1] : -1, (l < n) ? S1[l + 1] : -1, pf_params) * scale[2]; pR[k][l] += temp; } if (pR[k][l] > Qmax) { Qmax = pR[k][l]; if (Qmax > max_real / 10.) vrna_message_warning("P close to overflow: %d %d %g %g\n", i, m, pR[k][l], qb[k][l]); } if (pR[k][l] >= max_real) { (*ov)++; pR[k][l] = FLT_MAX; } } /* end for (l=..) */ } tmp = prm_l1; aux_arrays->prm_l1 = prm_l; aux_arrays->prm_l = tmp; } PRIVATE void probability_correction(vrna_fold_compound_t *vc, int i) { int j, howoften, pairdist, turn, n, winSize; FLT_OR_DBL **qb, **pR; n = vc->length; winSize = vc->window_size; turn = vc->exp_params->model_details.min_loop_size; howoften = 0; /* how many samples do we have for this pair */ qb = vc->exp_matrices->qb_local; pR = vc->exp_matrices->pR; for (j = i + turn; j < MIN2(i + winSize, n + 1); j++) { pairdist = (j - i + 1); /* 4cases */ howoften = MIN2(winSize - pairdist + 1, i); /* pairdist,start */ howoften = MIN2(howoften, n - j + 1); /* end */ howoften = MIN2(howoften, n - winSize + 1); /* windowsize */ pR[i][j] *= qb[i][j] / howoften; } return; } PRIVATE void make_ptypes(vrna_fold_compound_t *vc, int i) { /* make new entries in ptype array */ char **ptype; const short *S; int j, type, pairSize, n; vrna_md_t *md; ptype = vc->ptype_local; md = &(vc->exp_params->model_details); pairSize = md->max_bp_span; S = vc->sequence_encoding2; n = vc->length; for (j = i; j <= MIN2(i + pairSize, n); j++) { type = md->pair[S[i]][S[j]]; ptype[i][j] = (char)type; } return; } #if 0 PRIVATE vrna_ep_t * get_deppp(vrna_fold_compound_t *vc, vrna_ep_t *pl, int start) { /* compute dependent pair probabilities */ int i, j, count = 0; double tmp; vrna_ep_t *temp; char **ptype; short *S1; FLT_OR_DBL **qb, *scale; int *rtype, turn, pairsize, length; vrna_exp_param_t *pf_params; S1 = vc->sequence_encoding; pf_params = vc->exp_params; ptype = vc->ptype_local; qb = vc->exp_matrices->qb_local; scale = vc->exp_matrices->scale; rtype = &(pf_params->model_details.rtype[0]); turn = pf_params->model_details.min_loop_size; pairsize = pf_params->model_details.max_bp_span; length = vc->length; temp = (vrna_ep_t *)vrna_alloc(pairsize * sizeof(vrna_ep_t)); /* holds temporary deppp */ for (j = start + turn; j < MIN2(start + pairsize, length); j++) { if ((qb[start][j] * qb[start - 1][(j + 1)]) > 10e-200) { int type = ptype[start - 1][j + 1]; int type_2 = rtype[(unsigned char)ptype[start][j]]; tmp = qb[start][j] / qb[start - 1][(j + 1)] * exp_E_IntLoop(0, 0, type, type_2, S1[start], S1[j], S1[start - 1], S1[j + 1], pf_params) * scale[2]; temp[count].i = start; temp[count].j = j; temp[count++].p = tmp; } } /* write it to list of deppps */ for (i = 0; pl[i].i != 0; i++); pl = (vrna_ep_t *)vrna_realloc(pl, (i + count + 1) * sizeof(vrna_ep_t)); for (j = 0; j < count; j++) { pl[i + j].i = temp[j].i; pl[i + j].j = temp[j].j; pl[i + j].p = temp[j].p; } pl[i + count].i = 0; pl[i + count].j = 0; pl[i + count].p = 0; free(temp); return pl; } #endif PRIVATE FLT_OR_DBL * compute_stack_probabilities(vrna_fold_compound_t *vc, int start) { /* compute dependent pair probabilities */ char **ptype; short *S1; int j, max_j, *rtype, turn, pairsize, length, type, type_2; FLT_OR_DBL **qb, *scale, *probs; double tmp; vrna_exp_param_t *pf_params; length = vc->length; S1 = vc->sequence_encoding; pf_params = vc->exp_params; ptype = vc->ptype_local; qb = vc->exp_matrices->qb_local; scale = vc->exp_matrices->scale; rtype = &(pf_params->model_details.rtype[0]); turn = pf_params->model_details.min_loop_size; pairsize = pf_params->model_details.max_bp_span; max_j = MIN2(start + pairsize, length) - 1; probs = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (max_j - start + 1)); for (j = start + turn + 1; j <= max_j; j++) { if ((qb[start][j] * qb[start - 1][(j + 1)]) > 10e-200) { type = vrna_get_ptype_window(start - 1, j + 1 + start - 1, ptype); type_2 = rtype[vrna_get_ptype_window(start, j + start, ptype)]; tmp = qb[start][j] / qb[start - 1][(j + 1)] * exp_E_IntLoop(0, 0, type, type_2, S1[start], S1[j], S1[start - 1], S1[j + 1], pf_params) * scale[2]; probs[j - start - 1] = tmp; } } return probs; } /* * Here: Space for questions... */ PRIVATE void compute_pU(vrna_fold_compound_t *vc, int k, int ulength, helper_arrays *aux_arrays, vrna_probs_window_callback *cb, void *data, unsigned int options) { /* * here, we try to add a function computing all unpaired probabilities starting at some i, * going down to $unpaired, to be unpaired, i.e. a list with entries from 1 to unpaired for * every i, with the probability of a stretch of length x, starting at i-x+1, to be unpaired */ char **ptype; short *S1; int startu, i5, j3, len, obp, *rtype, turn, winSize, n, leftmost, rightmost, tt; FLT_OR_DBL expMLclosing, *expMLbase, **q, **qm, **qm2, *scale, **pR, **QI5, **q2l, **qmb; double qqq, temp, *QBE, *QBI, *QBM, *QBH, **pU, **pUO, **pUH, **pUI, **pUM; vrna_exp_param_t *pf_params; vrna_hc_t *hc; vrna_sc_t *sc; n = vc->length; winSize = vc->window_size; S1 = vc->sequence_encoding; pf_params = vc->exp_params; ptype = vc->ptype_local; rtype = &(pf_params->model_details.rtype[0]); scale = vc->exp_matrices->scale; q = vc->exp_matrices->q_local; qm = vc->exp_matrices->qm_local; qm2 = vc->exp_matrices->qm2_local; expMLbase = vc->exp_matrices->expMLbase; expMLclosing = pf_params->expMLclosing; pR = vc->exp_matrices->pR; QI5 = vc->exp_matrices->QI5; q2l = vc->exp_matrices->q2l; qmb = vc->exp_matrices->qmb; turn = pf_params->model_details.min_loop_size; hc = vc->hc; sc = vc->sc; pU = aux_arrays->pU; pUO = aux_arrays->pUO; pUH = aux_arrays->pUH; pUI = aux_arrays->pUI; pUM = aux_arrays->pUM; QBE = (double *)vrna_alloc((MAX2(ulength, MAXLOOP) + 2) * sizeof(double)); QBM = (double *)vrna_alloc((MAX2(ulength, MAXLOOP) + 2) * sizeof(double)); QBI = (double *)vrna_alloc((MAX2(ulength, MAXLOOP) + 2) * sizeof(double)); QBH = (double *)vrna_alloc((MAX2(ulength, MAXLOOP) + 2) * sizeof(double)); /* first, we will */ /* for k<=ulength, pU[k][k]=0, because no bp can enclose it */ /* compute pu[k+ulength][ulength] */ for (i5 = MAX2(k + ulength - winSize + 1, 1); i5 <= k; i5++) { for (j3 = k + ulength + 1; j3 <= MIN2(n, i5 + winSize - 1); j3++) { /* Multiloops */ if (hc->matrix_local[i5][j3 - i5] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) { tt = rtype[vrna_get_ptype_window(i5, j3 + i5, ptype)]; temp = 0.; /* * (.. >-----|..........) * i5 j j+ulength j3 */ /* (..{}{}-----|......) */ if ((hc->up_ml[k + 1] >= j3 - k - 1) && (i5 < k)) { qqq = qm2[i5 + 1][k] * expMLbase[j3 - k - 1]; if (sc) { if (sc->exp_energy_up) qqq *= sc->exp_energy_up[k + 1][j3 - k - 1]; if (sc->f) qqq *= sc->f(i5, j3, i5 + 1, k, VRNA_DECOMP_PAIR_ML, sc->data); } temp += qqq; } /* (..|-----|{}{}) */ if ((hc->up_ml[i5 + 1] >= k + ulength - i5) && (j3 - 1 > k + ulength)) { qqq = qm2[k + ulength + 1][j3 - 1] * expMLbase[k + ulength - i5]; if (sc) { if (sc->exp_energy_up) qqq *= sc->exp_energy_up[i5 + 1][k + ulength - i5]; if (sc->f) qqq *= sc->f(i5, j3, k + ulength + 1, j3, VRNA_DECOMP_PAIR_ML, sc->data); } temp += qqq; } /* ({}|-----|{}) */ if ((hc->up_ml[k + 1] >= ulength) && (i5 < k) && (j3 - 1 > k + ulength)) { qqq = qm[i5 + 1][k] * qm[k + ulength + 1][j3 - 1] * expMLbase[ulength]; if (sc) { if (sc->exp_energy_up) qqq *= sc->exp_energy_up[k + 1][ulength]; if (sc->f) qqq *= sc->f(i5, j3, k, k + ulength + 1, VRNA_DECOMP_PAIR_ML_OUTSIDE, sc->data); } temp += qqq; } /* add dangles, multloopclosing etc. */ qqq = exp_E_MLstem(tt, S1[j3 - 1], S1[i5 + 1], pf_params) * scale[2] * expMLclosing; if (sc) if (sc->exp_energy_bp_local) qqq *= sc->exp_energy_bp_local[i5][j3 - i5]; temp *= qqq; pU[k + ulength][ulength] += temp * pR[i5][j3]; if (options & VRNA_PROBS_WINDOW_UP_SPLIT) pUM[k + ulength][ulength] += temp * pR[i5][j3]; } /* add hairpins */ if (hc->matrix_local[i5][j3 - i5] & VRNA_CONSTRAINT_CONTEXT_HP_LOOP) { temp = vrna_exp_E_hp_loop(vc, i5, j3); pU[k + ulength][ulength] += temp * pR[i5][j3]; if (options & VRNA_PROBS_WINDOW_UP_SPLIT) pUH[k + ulength][ulength] += temp * pR[i5][j3]; } } } /* Add Interior loop contribution to QBE (and QBI) */ temp = 0.; for (len = winSize; len > MAX2(ulength, MAXLOOP); len--) temp += QI5[k][len]; for (; len > 0; len--) { temp += QI5[k][len]; QBI[len] += temp; QBE[len] += temp; } /* Add Hairpin loop contribution to QBE (and QBH) */ temp = 0.; for (obp = MIN2(n, k + winSize - 1); obp > k + ulength; obp--) temp += pR[k][obp] * vrna_exp_E_hp_loop(vc, k, obp); for (obp = MIN2(n, MIN2(k + winSize - 1, k + ulength)); obp > k + 1; obp--) { temp += pR[k][obp] * vrna_exp_E_hp_loop(vc, k, obp); QBH[obp - k - 1] += temp; QBE[obp - k - 1] += temp; } /* * Add up Multiloopterms qmb[l][m]+=prml[m]*dang; * q2l[l][m]+=(prml[m]-prm_l[m])*dang; */ temp = 0.; /* add (()()____) type cont. to I3 */ if (sc && sc->exp_energy_up) { for (len = winSize; len >= ulength; len--) if (hc->up_ml[k + 1] >= len) { temp += q2l[k][len] * expMLbase[len] * sc->exp_energy_up[k + 1][len]; } for (; len > 0; len--) { if (hc->up_ml[k + 1] >= len) { temp += q2l[k][len] * expMLbase[len] * sc->exp_energy_up[k + 1][len]; } QBM[len] += temp; QBE[len] += temp; } } else { for (len = winSize; len >= ulength; len--) if (hc->up_ml[k + 1] >= len) temp += q2l[k][len] * expMLbase[len]; for (; len > 0; len--) { if (hc->up_ml[k + 1] >= len) temp += q2l[k][len] * expMLbase[len]; QBM[len] += temp; QBE[len] += temp; } } /* add (()___()) */ for (len = 1; len < ulength; len++) { if (hc->up_ml[k + 1] >= len) { for (obp = k + len + turn; obp <= MIN2(n, k + winSize - 1); obp++) { temp = qmb[k][obp - k - 1] * qm[k + len + 1 /*2*/][obp - 1] * expMLbase[len]; if (sc) if (sc->exp_energy_up) temp *= sc->exp_energy_up[k + 1][len]; QBM[len] += temp; QBE[len] += temp; } } } /* add (___()()) */ for (len = 1; len < ulength; len++) { if (hc->up_ml[k + 1] >= len) { for (obp = k + len + turn + turn; obp <= MIN2(n, k + winSize - 1); obp++) { if (hc->matrix_local[k][obp - k] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) { tt = rtype[vrna_get_ptype_window(k, obp + k, ptype)]; temp = exp_E_MLstem(tt, S1[obp - 1], S1[k + 1], pf_params) * scale[2] * expMLbase[len] * expMLclosing * pR[k][obp] * qm2[k + len + 1][obp - 1]; /* k:obp */ if (sc) { if (sc->exp_energy_up) temp *= sc->exp_energy_up[k + 1][len]; if (sc->exp_energy_bp) temp *= sc->exp_energy_bp_local[k][obp - k]; } QBM[len] += temp; QBE[len] += temp; } } } } /* * After computing all these contributions in QBE[len], that k is paired * and the unpaired stretch is AT LEAST len long, we start to add that to * the old unpaired thingies; */ for (len = 1; len <= MIN2(MAX2(ulength, MAXLOOP), n - k); len++) pU[k + len][len] += pU[k + len][len + 1] + QBE[len]; if (options & VRNA_PROBS_WINDOW_UP_SPLIT) { for (len = 1; len <= MIN2(MAX2(ulength, MAXLOOP), n - k); len++) { pUH[k + len][len] += pUH[k + len][len + 1] + QBH[len]; pUM[k + len][len] += pUM[k + len][len + 1] + QBM[len]; pUI[k + len][len] += pUI[k + len][len + 1] + QBI[len]; } /* open chain */ if ((ulength >= winSize) && (k >= ulength) && (hc->up_ext[k - winSize + 1] >= winSize)) pUO[k][winSize] = scale[winSize] / q[k - winSize + 1][k]; } /* open chain */ if ((ulength >= winSize) && (k >= ulength) && (hc->up_ext[k - winSize + 1] >= winSize)) { if (sc && sc->exp_energy_up) { pU[k][winSize] = scale[winSize] * sc->exp_energy_up[k][winSize] / q[k - winSize + 1][k]; } else { pU[k][winSize] = scale[winSize] / q[k - winSize + 1][k]; } } /* * now the not enclosed by any base pair terms for whatever it is we do not need anymore... * ... which should be e.g; k, again */ for (startu = MIN2(ulength, k); startu > 0; startu--) { temp = 0.; /* check whether soft constraint unpaired contributions available */ if (sc && sc->exp_energy_up) { if (hc->up_ext[k - startu + 1] >= startu) { for (i5 = MAX2(1, k - winSize + 2); i5 <= MIN2(k - startu, n - winSize + 1); i5++) temp += q[i5][k - startu] * q[k + 1][i5 + winSize - 1] * scale[startu] * sc->exp_energy_up[k - startu + 1][startu] / q[i5][i5 + winSize - 1]; /* the 2 Cases where the borders are on the edge of the interval */ if ((k >= winSize) && (startu + 1 <= winSize)) { temp += q[k - winSize + 1][k - startu] * scale[startu] * sc->exp_energy_up[k - startu + 1][startu] / q[k - winSize + 1][k]; } if ((k <= n - winSize + startu) && (k - startu >= 0) && (k < n) && (startu + 1 <= winSize)) { temp += q[k + 1][k - startu + winSize] * scale[startu] * sc->exp_energy_up[k - startu + 1][startu] / q[k - startu + 1][k - startu + winSize]; } } } else { if (hc->up_ext[k - startu + 1] >= startu) { for (i5 = MAX2(1, k - winSize + 2); i5 <= MIN2(k - startu, n - winSize + 1); i5++) temp += q[i5][k - startu] * q[k + 1][i5 + winSize - 1] * scale[startu] / q[i5][i5 + winSize - 1]; /* the 2 Cases where the borders are on the edge of the interval */ if ((k >= winSize) && (startu + 1 <= winSize)) temp += q[k - winSize + 1][k - startu] * scale[startu] / q[k - winSize + 1][k]; if ((k <= n - winSize + startu) && (k - startu >= 0) && (k < n) && (startu + 1 <= winSize)) temp += q[k + 1][k - startu + winSize] * scale[startu] / q[k - startu + 1][k - startu + winSize]; } } /* Divide by number of possible windows */ leftmost = MAX2(1, k - winSize + 1); rightmost = MIN2(n - winSize + 1, k - startu + 1); pU[k][startu] += temp; pU[k][startu] /= (rightmost - leftmost + 1); if (options & VRNA_PROBS_WINDOW_UP_SPLIT) { pUO[k][startu] += temp; /* Do we want to make a distinction between those? */ pUO[k][startu] /= (rightmost - leftmost + 1); pUH[k][startu] /= (rightmost - leftmost + 1); pUI[k][startu] /= (rightmost - leftmost + 1); pUM[k][startu] /= (rightmost - leftmost + 1); } } free(QBE); free(QBI); free(QBH); free(QBM); /* call return callback */ return_pU(MIN2(ulength, k), k, ulength, aux_arrays, cb, data, options); return; } PRIVATE void print_bpp_callback(FLT_OR_DBL *pr, int size, int k, void *data) { int j; FILE *fp = ((default_cb_data *)data)->fp_bpp; FLT_OR_DBL cutoff = ((default_cb_data *)data)->bpp_cutoff; for (j = k + 1; j <= size; j++) { if (pr[j] < cutoff) continue; fprintf(fp, "%d %d %g\n", k, j, pr[j]); } } PRIVATE void store_bpp_callback(FLT_OR_DBL *pr, int size, int k, void *data) { int j; vrna_ep_t *pl = ((default_cb_data *)data)->bpp; unsigned int pl_size = ((default_cb_data *)data)->bpp_size; unsigned int pl_max_size = ((default_cb_data *)data)->bpp_max_size; FLT_OR_DBL cutoff = ((default_cb_data *)data)->bpp_cutoff; if (pl_max_size == 0) { /* init if necessary */ pl_max_size = 100; pl = (vrna_ep_t *)vrna_realloc(pl, sizeof(vrna_ep_t) * pl_max_size); } for (j = k + 1; j <= size; j++) { if (pr[j] < cutoff) continue; /* resize vrna_ep_t memory if necessary */ if (pl_size >= pl_max_size - 1) { pl_max_size *= 1.5; pl = (vrna_ep_t *)vrna_realloc(pl, sizeof(vrna_ep_t) * pl_max_size); } pl[pl_size].i = k; pl[pl_size].j = j; pl[pl_size].type = VRNA_PLIST_TYPE_BASEPAIR; pl[pl_size++].p = pr[j]; } /* mark end of vrna_ep_t */ pl[pl_size].i = 0; pl[pl_size].j = 0; pl[pl_size].type = VRNA_PLIST_TYPE_BASEPAIR; pl[pl_size].p = 0.; /* update data */ ((default_cb_data *)data)->bpp = pl; ((default_cb_data *)data)->bpp_size = pl_size; ((default_cb_data *)data)->bpp_max_size = pl_max_size; } #if 0 PRIVATE void store_stack_prob_callback(FLT_OR_DBL *pr, int size, int k, void *data) { int j; vrna_ep_t *pl = ((default_cb_data *)data)->stack_prob; unsigned int pl_size = ((default_cb_data *)data)->stack_prob_size; unsigned int pl_max_size = ((default_cb_data *)data)->stack_prob_max_size; FLT_OR_DBL cutoff = ((default_cb_data *)data)->bpp_cutoff; if (pl_max_size == 0) { /* init if necessary */ pl_max_size = 100; pl = (vrna_ep_t *)vrna_realloc(pl, sizeof(vrna_ep_t) * pl_max_size); } for (j = k + 1; j <= size; j++) { if (pr[j] < cutoff) continue; /* resize vrna_ep_t memory if necessary */ if (pl_size >= pl_max_size - 1) { pl_max_size *= 1.5; pl = (vrna_ep_t *)vrna_realloc(pl, sizeof(vrna_ep_t) * pl_max_size); } pl[pl_size].i = k; pl[pl_size].j = j; pl[pl_size].type = VRNA_PLIST_TYPE_BASEPAIR; pl[pl_size++].p = pr[j]; } /* mark end of vrna_ep_t */ pl[pl_size].i = 0; pl[pl_size].j = 0; pl[pl_size].type = VRNA_PLIST_TYPE_BASEPAIR; pl[pl_size].p = 0.; /* update data */ ((default_cb_data *)data)->stack_prob = pl; ((default_cb_data *)data)->stack_prob_size = pl_size; ((default_cb_data *)data)->stack_prob_max_size = pl_max_size; } #endif PRIVATE void print_pU_callback(double *pU, int size, int k, int ulength, unsigned int type, void *data) { if (type & VRNA_PROBS_WINDOW_UP) { int i; FILE *fp = ((default_cb_data *)data)->fp_pU; fprintf(fp, "%d\t", k); for (i = 1; i < size; i++) fprintf(fp, "%.7g\t", pU[i]); fprintf(fp, "%.7g", pU[size]); if ((type & VRNA_ANY_LOOP) == VRNA_ANY_LOOP) fprintf(fp, "\n"); else if (type & VRNA_EXT_LOOP) fprintf(fp, "\tE\n"); else if (type & VRNA_HP_LOOP) fprintf(fp, "\tH\n"); else if (type & VRNA_INT_LOOP) fprintf(fp, "\tI\n"); else if (type & VRNA_MB_LOOP) fprintf(fp, "\tM\n"); else vrna_message_warning("unknown loop type"); } } PRIVATE void store_pU_callback(double *pU, int size, int k, int ulength, unsigned int type, void *data) { int i; double **pU_storage = ((default_cb_data *)data)->pU; if ((type & VRNA_PROBS_WINDOW_UP) && ((type & VRNA_ANY_LOOP) == VRNA_ANY_LOOP)) { pU_storage[k] = (double *)vrna_alloc(sizeof(double) * (ulength + 1)); for (i = 1; i <= size; i++) pU_storage[k][i] = pU[i]; } } PRIVATE void backward_compat_callback(FLT_OR_DBL *pr, int pr_size, int i, int max, unsigned int type, void *data) { default_cb_data *d = (default_cb_data *)data; if (type & VRNA_PROBS_WINDOW_BPP) { if (d->bpp_print) print_bpp_callback(pr, pr_size, i, data); else store_bpp_callback(pr, pr_size, i, data); } else if (type & VRNA_PROBS_WINDOW_UP) { if (d->up_print) print_pU_callback(pr, pr_size, i, max, type, data); else store_pU_callback(pr, pr_size, i, max, type, data); } } #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY /*###########################################*/ /*# deprecated functions below #*/ /*###########################################*/ PRIVATE vrna_ep_t * wrap_pf_foldLP(char *sequence, int winSize, int pairSize, float cutoffb, double **pU, vrna_ep_t **dpp2, FILE *pUfp, FILE *spup, vrna_exp_param_t *parameters) { int ulength, r; vrna_fold_compound_t *vc; vrna_md_t md; default_cb_data data; vc = NULL; ulength = 0; /* * if present, extract model details from provided parameters variable, * to properly initialize the fold compound. Otherwise use default * settings taken from deprecated global variables */ if (parameters) vrna_md_copy(&md, &(parameters->model_details)); else set_model_details(&md); md.compute_bpp = 1; /* turn on base pair probability computations */ md.window_size = winSize; /* set size of sliding window */ md.max_bp_span = pairSize; /* set maximum base pair span */ vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_DEFAULT | VRNA_OPTION_WINDOW); /* * if present, attach a copy of the parameters structure instead of the * default parameters but take care of re-setting it to (initialized) * model details */ free(vc->exp_params); if (parameters) { vrna_md_copy(&(parameters->model_details), &(vc->params->model_details)); vc->exp_params = vrna_exp_params_copy(parameters); } else { vc->exp_params = vrna_exp_params(&(vc->params->model_details)); } /* propagate global pf_scale into vc->exp_params */ vc->exp_params->pf_scale = pf_scale; if (backward_compat_compound && backward_compat) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = vc; backward_compat = 1; iindx = backward_compat_compound->iindx; /* for backward compatibility and Perl wrapper */ if (pU) ulength = (int)pU[0][0] + 0.49; data.fp_pU = pUfp; data.pU = pU; data.bpp_cutoff = (FLT_OR_DBL)cutoffb; data.fp_bpp = spup; data.bpp = NULL; data.bpp_max_size = 0; data.bpp_size = 0; data.stack_prob = NULL; data.stack_prob_max_size = 0; data.stack_prob_size = 0; data.bpp_print = (spup) ? 1 : 0; data.up_print = (pUfp) ? 1 : 0; unsigned int options = VRNA_PROBS_WINDOW_BPP; /* always compute base pair probabilities */ if (dpp2 && (*dpp2)) options |= VRNA_PROBS_WINDOW_STACKP; if (ulength > 0) options |= VRNA_PROBS_WINDOW_UP; r = vrna_probs_window(vc, ulength, options, &backward_compat_callback, (void *)&data); if (!r) return NULL; if (dpp2 && (*dpp2)) { data.stack_prob = (vrna_ep_t *)vrna_realloc(data.stack_prob, sizeof(vrna_ep_t) * (data.stack_prob_size + 1)); data.stack_prob[data.stack_prob_size].i = 0; data.stack_prob[data.stack_prob_size].j = 0; data.stack_prob[data.stack_prob_size].type = VRNA_PLIST_TYPE_BASEPAIR; data.stack_prob[data.stack_prob_size].p = 0; free(*dpp2); /* free already occupied memory */ *dpp2 = data.stack_prob; } if (!spup) { data.bpp = (vrna_ep_t *)vrna_realloc(data.bpp, sizeof(vrna_ep_t) * (data.bpp_size + 1)); data.bpp[data.bpp_size].i = 0; data.bpp[data.bpp_size].j = 0; data.bpp[data.bpp_size].type = VRNA_PLIST_TYPE_BASEPAIR; data.bpp[data.bpp_size].p = 0; return data.bpp; } else { return NULL; } } PUBLIC void init_pf_foldLP(int length) { /* DO NOTHING */ } PUBLIC void update_pf_paramsLP(int length) { if (backward_compat_compound && backward_compat) { vrna_md_t md; set_model_details(&md); vrna_exp_params_reset(backward_compat_compound, &md); /* compatibility with RNAup, may be removed sometime */ pf_scale = backward_compat_compound->exp_params->pf_scale; } } PUBLIC void update_pf_paramsLP_par(int length, vrna_exp_param_t *parameters) { if (backward_compat_compound && backward_compat) { vrna_md_t md; if (parameters) { vrna_exp_params_subst(backward_compat_compound, parameters); } else { set_model_details(&md); vrna_exp_params_reset(backward_compat_compound, &md); } /* compatibility with RNAup, may be removed sometime */ pf_scale = backward_compat_compound->exp_params->pf_scale; } } PUBLIC vrna_ep_t * pfl_fold(char *sequence, int winSize, int pairSize, float cutoffb, double **pU, vrna_ep_t **dpp2, FILE *pUfp, FILE *spup) { return wrap_pf_foldLP(sequence, winSize, pairSize, cutoffb, pU, dpp2, pUfp, spup, NULL); } PUBLIC vrna_ep_t * pfl_fold_par(char *sequence, int winSize, int pairSize, float cutoffb, double **pU, vrna_ep_t **dpp2, FILE *pUfp, FILE *spup, vrna_exp_param_t *parameters) { return wrap_pf_foldLP(sequence, winSize, pairSize, cutoffb, pU, dpp2, pUfp, spup, parameters); } PUBLIC void putoutpU_prob(double **pU, int length, int ulength, FILE *fp, int energies) { if (backward_compat_compound && backward_compat) putoutpU_prob_par(pU, length, ulength, fp, energies, backward_compat_compound->exp_params); else vrna_message_warning("putoutpU_prob: Not doing anything! First, run pfl_fold()!"); } PUBLIC void putoutpU_prob_par(double **pU, int length, int ulength, FILE *fp, int energies, vrna_exp_param_t *parameters) { /* put out unpaireds */ int i, k; double temp, kT = parameters->kT / 1000.0; if (energies) fprintf(fp, "#opening energies\n #i$\tl="); else fprintf(fp, "#unpaired probabilities\n #i$\tl="); for (i = 1; i <= ulength; i++) fprintf(fp, "%d\t", i); fprintf(fp, "\n"); for (k = 1; k <= length; k++) { fprintf(fp, "%d\t", k); for (i = 1; i <= ulength; i++) { if (i > k) { fprintf(fp, "NA\t"); continue; } if (energies) temp = -log(pU[k][i]) * kT; else temp = pU[k][i]; fprintf(fp, "%.7g\t", temp); } fprintf(fp, "\n"); free(pU[k]); } fflush(fp); } PUBLIC void putoutpU_prob_bin(double **pU, int length, int ulength, FILE *fp, int energies) { if (backward_compat_compound && backward_compat) putoutpU_prob_bin_par(pU, length, ulength, fp, energies, backward_compat_compound->exp_params); else vrna_message_warning("putoutpU_prob_bin: Not doing anything! First, run pfl_fold()!"); } PUBLIC void putoutpU_prob_bin_par(double **pU, int length, int ulength, FILE *fp, int energies, vrna_exp_param_t *parameters) { /* put out unpaireds */ int i, k, *p; double kT = parameters->kT / 1000.0; p = (int *)vrna_alloc(sizeof(int) * 1); /* write first line */ p[0] = ulength; /* u length */ fwrite(p, sizeof(int), 1, fp); p[0] = length; /* seq length */ fwrite(p, sizeof(int), 1, fp); for (k = 3; k <= (length + 20); k++) { /* all the other lines are set to 1000000 because we are at ulength=0 */ p[0] = 1000000; fwrite(p, sizeof(int), 1, fp); } /* data */ for (i = 1; i <= ulength; i++) { for (k = 1; k <= 11; k++) { /* write first ten entries to 1000000 */ p[0] = 1000000; fwrite(p, sizeof(int), 1, fp); } for (k = 1; k <= length; k++) { /* write data now */ if (i > k) { p[0] = 1000000; /* check if u > pos */ fwrite(p, sizeof(int), 1, fp); continue; } else { p[0] = (int)rint(100 * (-log(pU[k][i]) * kT)); fwrite(p, sizeof(int), 1, fp); } } for (k = 1; k <= 9; k++) { /* finish by writing the last 10 entries */ p[0] = 1000000; fwrite(p, sizeof(int), 1, fp); } } /* free pU array; */ for (k = 1; k <= length; k++) free(pU[k]); free(p); fflush(fp); } #endif
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/Specifiers.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = NumStmtBits + 9 }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; /// The location of the declaration name itself. SourceLocation Loc; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; enum { NumBoundsCheckKindBits = 2 }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; unsigned BoundsCheckKind : NumBoundsCheckKindBits; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArraySubscriptExprBitfields { friend class ArraySubscriptExpr; unsigned : NumExprBits; unsigned BoundsCheckKind : NumBoundsCheckKindBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 2 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. unsigned BoundsSafeInterface : 1; /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types and 0 otherwise. unsigned FPFeatures : 3; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; // Only meaningful for floating point types. unsigned FPFeatures : 3; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; enum { NumBoundsExprKindBits = 3 }; class BoundsExprBitfields { friend class BoundsExpr; unsigned : NumExprBits; unsigned Kind : NumBoundsExprKindBits; unsigned IsCompilerGenerated : 1; }; enum { NumInteropTypeExprKindBits = 1 }; class InteropTypeExprBitfields { friend class InteropTypeExpr; unsigned : NumExprBits; unsigned IsCompilerGenerated : 1; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArraySubscriptExprBitfields ArraySubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; BoundsExprBitfields BoundsExprBits; InteropTypeExprBitfields InteropTypeExprBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only Expr * /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). struct ExprIterator : llvm::iterator_adaptor_base<ExprIterator, Stmt **, std::random_access_iterator_tag, Expr *> { ExprIterator() : iterator_adaptor_base(nullptr) {} ExprIterator(Stmt **I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<Expr **>(I); } }; /// Const iterator for iterating over Stmt * arrays that contain only Expr * struct ConstExprIterator : llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *, std::random_access_iterator_tag, const Expr *const> { ConstExprIterator() : iterator_adaptor_base(nullptr) {} ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<const Expr *const *>(I); } }; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); const Stmt *IgnoreImplicit() const { return const_cast<Stmt *>(this)->IgnoreImplicit(); } /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; // The kind of Checked C checking to do in a scope. enum class CheckedScopeKind { // No checking. Unchecked = 0x1, /// Check properties for bounds safety. Bounds = 0x2, /// Check properties for bounds safety and preventing type confusion. BoundsAndTypes = 0x4 }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; // Written checked scope specifier. unsigned WrittenCSS : 2; // Inferred checked scope specifier, using information from parent // scope also. unsigned CSS : 2; // Checked scope keyword (_Checked / _Unchecked) location. SourceLocation CSSLoc; // Checked scope modifier (_Bounds_only) location. SourceLocation CSMLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB, CheckedScopeSpecifier WrittenCSS = CSS_None, CheckedScopeSpecifier CSS = CSS_Unchecked, SourceLocation CSSLoc = SourceLocation(), SourceLocation CSMLoc = SourceLocation()); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), WrittenCSS(CSS_None), CSS(CSS_Unchecked), CSSLoc(), CSMLoc() {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB, CheckedScopeSpecifier WrittenCSS = CSS_None, CheckedScopeSpecifier CSS = CSS_Unchecked, SourceLocation CSSLoc = SourceLocation(), SourceLocation CSMLoc = SourceLocation()); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc), WrittenCSS(CSS_None), CSS(CSS_Unchecked), CSSLoc(Loc), CSMLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } CheckedScopeSpecifier getWrittenCheckedSpecifier() const { return (CheckedScopeSpecifier) WrittenCSS; } CheckedScopeSpecifier getCheckedSpecifier() const { return (CheckedScopeSpecifier) CSS; } void setWrittenCheckedSpecifiers(CheckedScopeSpecifier NS) { WrittenCSS = NS; } void setCheckedSpecifiers(CheckedScopeSpecifier NS) { CSS = NS; } bool isCheckedScope() const { return CSS != CSS_Unchecked; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); body_begin()[size() - 1] = S; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } SourceLocation getCheckedSpecifierLoc() const { return CSSLoc; } SourceLocation getSpecifierModifierLoc() const { return CSMLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public Stmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public Stmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
GB_unaryop__ainv_uint32_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint32_bool // op(A') function: GB_tran__ainv_uint32_bool // C type: uint32_t // A type: bool // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = -aij #define GB_ATYPE \ bool #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint32_bool ( uint32_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint32_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
stencil.h
#include "multicore.h" #define index1D(i) (i) #define index2D(i,j) (((j)*(core->coreArrayNeighborhoodSizes_2D[1][1][0]))+(i)) // I think this should be in terms of the size for X and Y, not X, Y, and Z! // #define index3D(i,j,k) (((k)*core->coreArrayNeighborhoodSizes_3D[1][1][1][2]*core->coreArrayNeighborhoodSizes_3D[1][1][1][1])+((j)*core->coreArrayNeighborhoodSizes_3D[1][1][1][0])+(i)) #define index3D(i,j,k) (((k)*(core->coreArrayNeighborhoodSizes_3D[1][1][1][0])*(core->coreArrayNeighborhoodSizes_3D[1][1][1][1]))+((j)*(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]))+(i)) // And we need another macro for the general case where the memory segment is a different size than coreArrayNeighborhoodSizes_2D[1][1][0] in the X (and Y axis) // since the lenght of the data in each axis can be different along the same axis of the core array. These macros take the length of the array in the requires // axis to properly referne the element on the associated "other core". #define otherCore_index2D(i,j,sizeX) (((j)*sizeX)+(i)) #define otherCore_index3D(i,j,k,sizeX,sizeY) (((k)*sizeX*sizeY)+((j)*sizeX)+(i)) template <typename T> void relax2D( MulticoreArray<T> & array, MulticoreArray<T> & old_array ) { // This is a working example of the relaxation associated with the a stencil on the array abstraction // mapped to the separate multi-dimensional memorys allocated per core and onto a multi-dimenional // array of cores (core array). int numberOfCores = array.get_numberOfCores(); // Macro to support linearization of multi-dimensional 2D array index computation #define local_index2D(i,j) (((j)*sizeX)+(i)) // Use OpenMP to support the threading... #pragma omp parallel for for (int core = 0; core < numberOfCores; core++) { // This lifts out loop invariant portions of the code. T* arraySection = array.get_arraySectionPointers()[core]; T* old_arraySection = old_array.get_arraySectionPointers()[core]; // Lift out loop invariant local array size values. int sizeX = array.get_coreArray()[core]->coreArrayNeighborhoodSizes_2D[1][1][0]; int sizeY = array.get_coreArray()[core]->coreArrayNeighborhoodSizes_2D[1][1][1]; for (int j = 1; j < sizeY-1; j++) { for (int i = 1; i < sizeX-1; i++) { // This is the dominant computation for each array section per core. The compiler // will use the user's code to derive the code that will be put here. arraySection[local_index2D(i,j)] = (old_arraySection[local_index2D(i-1,j)] + old_arraySection[local_index2D(i+1,j)] + old_arraySection[local_index2D(i,j-1)] + old_arraySection[local_index2D(i,j+1)]) / 4.0; } } // We could alternatively generate the call for relaxation for the internal // boundaries in the same loop (reduces syncronization). array.get_coreArray()[core]->relax_on_boundary(core,array,old_array); } // Relax on the edges of the array sections on each core (use the alternative approach). // relax2D_on_boundary(array,old_array); // undefine the local 2D index support macro #undef local_index2D } template <typename T> void relax2D_on_boundary( MulticoreArray<T> & array, MulticoreArray<T> & old_array ) { // This function supports the relaxation operator on the internal boundaries // of the different arrays allocated on a per core basis. We take advantage // of shared memory to support the stencil operations. int numberOfCores = array.get_numberOfCores(); #pragma omp parallel for for (int core = 0; core < numberOfCores; core++) { array.get_coreArray()[core]->relax_on_boundary(core,array,old_array); } } template <typename T> void relax3D_midlevel( MulticoreArray<T> & array, MulticoreArray<T> & old_array ) { // This is a working example of the relaxation associated with the a stencil on the array abstraction // mapped to the separate multi-dimensional memorys allocated per core and onto a multi-dimenional // array of cores (core array). // Note: As an alternative to the specialized side handling for internal boundary updates // consider a loop over all of the array while skipping the interior regions for better // performance. This could use the general technique demonstrated above for general // internal core edge updates without any significant loss in parformance (maybe). // This might permit more general internal application of the stencil operator to the // edges of array sections on each core. Such code might be more easily generated then // the more complex form of edge code in the much larger functions (below). int numberOfCores_X = array.get_coreArraySize(0); int numberOfCores_Y = array.get_coreArraySize(1); int numberOfCores_Z = array.get_coreArraySize(2); // Use OpenMP to support the threading... #pragma omp parallel for for (int core_X = 0; core_X < numberOfCores_X; core_X++) { //#pragma omp for for (int core_Y = 0; core_Y < numberOfCores_Y; core_Y++) { for (int core_Z = 0; core_Z < numberOfCores_Z; core_Z++) { // This lifts out loop invariant portions of the code. Core<T> & coreMemory = array.getCore(core_X,core_Y,core_Z); // Lift out loop invariant local array size values. int sizeX = array.getCore(core_X,core_Y,core_Z).coreArrayNeighborhoodSizes_3D[1][1][1][0]; int sizeY = array.getCore(core_X,core_Y,core_Z).coreArrayNeighborhoodSizes_3D[1][1][1][1]; int sizeZ = array.getCore(core_X,core_Y,core_Z).coreArrayNeighborhoodSizes_3D[1][1][1][2]; #if 0 printf ("\nsizeX = %d sizeY = %d sizeZ = %d \n",sizeX,sizeY,sizeZ); #endif int base_X = (coreMemory.boundaryCore_3D[0][0] == true) ? 1 : 0; int bound_X = (coreMemory.boundaryCore_3D[0][1] == true) ? sizeX - 2: sizeX - 1; int base_Y = (coreMemory.boundaryCore_3D[1][0] == true) ? 1 : 0; int bound_Y = (coreMemory.boundaryCore_3D[1][1] == true) ? sizeY - 2: sizeY - 1; int base_Z = (coreMemory.boundaryCore_3D[2][0] == true) ? 1 : 0; int bound_Z = (coreMemory.boundaryCore_3D[2][1] == true) ? sizeZ - 2: sizeZ - 1; #if 0 printf ("core_X = %d core_Y = %d core_Z = %d base_X = %d bound_X = %d base_Y = %d bound_Y = %d base_Z = %d bound_Z = %d\n",core_X,core_Y,core_Z,base_X,bound_X,base_Y,bound_Y,base_Z, bound_Z); #endif for (int k = base_Z; k <= bound_Z; k++) { for (int j = base_Y; j <= bound_Y; j++) { for (int i = base_X; i <= bound_X; i++) { #if 0 printf ("\ncore_X = %d core_Y = %d i = %d j = %d \n",core_X,core_Y,i,j); printf (" array.getCore(core_X,core_Y,core_Z)(i,j,k) = %f \n",array.getCore(core_X,core_Y,core_Z)(i,j,k)); printf ("old_array.getCore(core_X,core_Y,core_Z)(i-1,j,k) = %f \n",old_array.getCore(core_X,core_Y,core_Z)(i-1,j,k)); printf ("old_array.getCore(core_X,core_Y,core_Z)(i+1,j,k) = %f \n",old_array.getCore(core_X,core_Y,core_Z)(i+1,j,k)); printf ("old_array.getCore(core_X,core_Y,core_Z)(i,j-1,k) = %f \n",old_array.getCore(core_X,core_Y,core_Z)(i,j-1,k)); printf ("old_array.getCore(core_X,core_Y,core_Z)(i,j+1,k) = %f \n",old_array.getCore(core_X,core_Y,core_Z)(i,j+1,k)); printf ("old_array.getCore(core_X,core_Y,core_Z)(i,j,k-1) = %f \n",old_array.getCore(core_X,core_Y,core_Z)(i,j-1,k-1)); printf ("old_array.getCore(core_X,core_Y,core_Z)(i,j,k+1) = %f \n",old_array.getCore(core_X,core_Y,core_Z)(i,j+1,k+1)); #endif array.getCore(core_X,core_Y,core_Z)(i,j,k) = ( old_array.getCore(core_X,core_Y,core_Z)(i-1,j,k) + old_array.getCore(core_X,core_Y,core_Z)(i+1,j,k) + old_array.getCore(core_X,core_Y,core_Z)(i,j-1,k) + old_array.getCore(core_X,core_Y,core_Z)(i,j+1,k) + old_array.getCore(core_X,core_Y,core_Z)(i,j,k-1) + old_array.getCore(core_X,core_Y,core_Z)(i,j,k+1)) / 6.0; } } } } } } } template <typename T> void relax2D_midlevel( MulticoreArray<T> & array, MulticoreArray<T> & old_array ) { // This is a working example of the relaxation associated with the a stencil on the array abstraction // mapped to the separate multi-dimensional memorys allocated per core and onto a multi-dimenional // array of cores (core array). // Note: As an alternative to the specialized side handling for internal boundary updates // consider a loop over all of the array while skipping the interior regions for better // performance. This could use the general technique demonstrated above for general // internal core edge updates without any significant loss in parformance (maybe). // This might permit more general internal application of the stencil operator to the // edges of array sections on each core. Such code might be more easily generated then // the more complex form of edge code in the much larger functions (below). int numberOfCores_X = array.get_coreArraySize(0); int numberOfCores_Y = array.get_coreArraySize(1); // Use OpenMP to support the threading... #pragma omp parallel for for (int core_X = 0; core_X < numberOfCores_X; core_X++) { //#pragma omp for for (int core_Y = 0; core_Y < numberOfCores_Y; core_Y++) { // This lifts out loop invariant portions of the code. Core<T> & coreMemory = array.getCore(core_X,core_Y,0); // Lift out loop invariant local array size values. int sizeX = array.getCore(core_X,core_Y,0).coreArrayNeighborhoodSizes_2D[1][1][0]; int sizeY = array.getCore(core_X,core_Y,0).coreArrayNeighborhoodSizes_2D[1][1][1]; #if 0 printf ("\nsizeX = %d sizeY = %d \n",sizeX,sizeY); #endif int base_X = (coreMemory.boundaryCore_2D[0][0] == true) ? 1 : 0; int bound_X = (coreMemory.boundaryCore_2D[0][1] == true) ? sizeX - 2: sizeX - 1; int base_Y = (coreMemory.boundaryCore_2D[1][0] == true) ? 1 : 0; int bound_Y = (coreMemory.boundaryCore_2D[1][1] == true) ? sizeY - 2: sizeY - 1; #if 0 printf ("core_X = %d core_Y = %d base_X = %d bound_X = %d base_Y = %d bound_Y = %d \n",core_X,core_Y,base_X,bound_X,base_Y,bound_Y); #endif for (int j = base_Y; j <= bound_Y; j++) { for (int i = base_X; i <= bound_X; i++) { #if 0 printf ("\ncore_X = %d core_Y = %d i = %d j = %d \n",core_X,core_Y,i,j); printf ("array.getCore(core_X,core_Y,0)(i,j,0) = %f \n",array.getCore(core_X,core_Y,0)(i,j,0)); printf ("old_array.getCore(core_X,core_Y,0)(i-1,j,0) = %f \n",old_array.getCore(core_X,core_Y,0)(i-1,j,0)); printf ("old_array.getCore(core_X,core_Y,0)(i+1,j,0) = %f \n",old_array.getCore(core_X,core_Y,0)(i+1,j,0)); printf ("old_array.getCore(core_X,core_Y,0)(i,j-1,0) = %f \n",old_array.getCore(core_X,core_Y,0)(i,j-1,0)); printf ("old_array.getCore(core_X,core_Y,0)(i,j+1,0) = %f \n",old_array.getCore(core_X,core_Y,0)(i,j+1,0)); #endif array.getCore(core_X,core_Y,0)(i,j,0) = ( old_array.getCore(core_X,core_Y,0)(i-1,j,0) + old_array.getCore(core_X,core_Y,0)(i+1,j,0) + old_array.getCore(core_X,core_Y,0)(i,j-1,0) + old_array.getCore(core_X,core_Y,0)(i,j+1,0) ) / 4.0; } } } } } template <typename T> void relax3D_highlevel( MulticoreArray<T> & array, MulticoreArray<T> & old_array ) { // This is a working example of a 3D stencil demonstrating a high level interface // suitable only as debugging support. #pragma omp parallel for for (int k = 1; k < array.get_arraySize(2)-1; k++) { for (int j = 1; j < array.get_arraySize(1)-1; j++) { for (int i = 1; i < array.get_arraySize(0)-1; i++) { array(i,j,k) = ( old_array(i-1,j,k) + old_array(i+1,j,k) + old_array(i,j-1,k) + old_array(i,j+1,k) + old_array(i,j,k-1) + old_array(i,j,k+1)) / 6.0; } } } } template <typename T> void relax2D_highlevel( MulticoreArray<T> & array, MulticoreArray<T> & old_array ) { // This is a working example of a 3D stencil demonstrating a high level interface // suitable only as debugging support. //#pragma omp parallel for // for (int k = 1; k < array.get_arraySize(2)-1; k++) // { #pragma omp parallel for for (int j = 1; j < array.get_arraySize(1)-1; j++) { for (int i = 1; i < array.get_arraySize(0)-1; i++) { array(i,j,0) = ( old_array(i-1,j,0) + old_array(i+1,j,0) + old_array(i,j-1,0) + old_array(i,j+1,0)) / 4.0; } } // } } // ********************************************************************** template <typename T> void relax( int coreID, MulticoreArray<T> & array, MulticoreArray<T> & old_array, int dist) { // assert(multicoreArray != NULL); const int arraySizeX = array.get_arraySize(0); const int arraySizeY = array.get_arraySize(1); const int arraySizeZ = array.get_arraySize(2); int p = coreID; Core<T>* core = array.coreArray[coreID]; // This lifts out loop invariant portions of the code. T* arraySection = array.get_arraySectionPointers()[p]; T* old_arraySection = old_array.get_arraySectionPointers()[p]; #if 0 array.display("before relaxation on interior"); #endif assert(array.get_coreArraySize(0) == array.get_coreArraySize(0)); assert(array.get_coreArraySize(1) == array.get_coreArraySize(1)); assert(array.get_coreArraySize(2) == array.get_coreArraySize(2)); #if 0 printf ("\nIterate over all cores: p = %d arraySection = %p old_arraySection = %p \n",p,arraySection,old_arraySection); #endif assert(arraySection != NULL); assert(old_arraySection != NULL); #if 0 printf ("array.get_tableBasedDistribution() = %s \n",array.get_tableBasedDistribution() ? "true" : "false"); #endif #if 1 if (arraySizeZ > 2) { if (arraySizeY > 2 && arraySizeX > 2) { // This is the case of 3D relaxation #if 0 printf ("This is the case of 3D relaxation \n"); // Iterate on the interior of the section (non-shared memory operation, local to the closest local memory declared for each core). printf ("This needs to use sectionSize[0-2] to get the local size instead of the global size! \n"); #endif // This is required to avoid valgrind reported errors on some blocks where the local (sectionSize[dim]) is zero. // This is likely because of over flow from size_t type varaibles. assert(core->coreArrayNeighborhoodSizes_3D[1][1][1][0] >= 0); assert(core->coreArrayNeighborhoodSizes_3D[1][1][1][1] >= 0); assert(core->coreArrayNeighborhoodSizes_3D[1][1][1][2] >= 0); int LBX = (array.hasAttachedHalo() && core->boundaryCore_3D[0][0]) ? (dist+array.get_haloWidth(0)) : dist; int LBY = (array.hasAttachedHalo() && core->boundaryCore_3D[1][0]) ? (dist+array.get_haloWidth(1)) : dist; int LBZ = (array.hasAttachedHalo() && core->boundaryCore_3D[2][0]) ? (dist+array.get_haloWidth(2)) : dist; int UBX = (array.hasAttachedHalo() && core->boundaryCore_3D[0][1]) ? (core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist-array.get_haloWidth(0)) : (core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist); int UBY = (array.hasAttachedHalo() && core->boundaryCore_3D[1][1]) ? (core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist-array.get_haloWidth(1)) : (core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist); int UBZ = (array.hasAttachedHalo() && core->boundaryCore_3D[2][1]) ? (core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist-array.get_haloWidth(2)) : (core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist); if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > (2*dist) && core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > (2*dist) && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > (2*dist)) { for (int k = LBZ; k < UBZ; k++) { for (int j = LBY; j < UBY; j++) { for (int i = LBX; i < UBX; i++) { // This is the dominant computation for each array section per core. The compiler will use the // user's code to derive the code that will be put here. #if 0 printf ("p= %d Indexing 3D array (i,j,k) = (%d,%d,%d) \n",p,i,j,k); #endif #if 0 arraySection[index3D(i,j,k)] = (old_arraySection[index3D(i-1,j,k)] + old_arraySection[index3D(i+1,j,k)] + old_arraySection[index3D(i,j-1,k)] + old_arraySection[index3D(i,j+1,k)] + old_arraySection[index3D(i,j,k-1)] + old_arraySection[index3D(i,j,k+1)]) / 6.0; #else T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += (old_arraySection[index3D(i-d,j,k)] + old_arraySection[index3D(i+d,j,k)] + old_arraySection[index3D(i,j-d,k)] + old_arraySection[index3D(i,j+d,k)] + old_arraySection[index3D(i,j,k-d)] + old_arraySection[index3D(i,j,k+d)]); } arraySection[index3D(i,j,k)] = tmp / (6.0*dist); #endif } } } } } else { #if 0 printf ("3D array too small (still no interior) \n"); #endif } } else { if (arraySizeZ == 2) { #if 0 printf ("3D array (with size 2 in Z axis) too small (still no interior) \n"); #endif } else { if (arraySizeY > 2) { if (arraySizeX > 2) { // This is the case of 2D relaxation #if 0 printf ("This is the case of 2D relaxation (interior) p = %d \n",p); printf ("core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d core->coreArrayNeighborhoodSizes_2D[1][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1]); #endif int LBX = (array.hasAttachedHalo() && core->boundaryCore_2D[0][0]) ? (dist+array.get_haloWidth(0)) : dist; int LBY = (array.hasAttachedHalo() && core->boundaryCore_2D[1][0]) ? (dist+array.get_haloWidth(1)) : dist; int UBX = (array.hasAttachedHalo() && core->boundaryCore_2D[0][1]) ? (core->coreArrayNeighborhoodSizes_2D[1][1][0]-dist-array.get_haloWidth(0)) : (core->coreArrayNeighborhoodSizes_2D[1][1][0]-dist); int UBY = (array.hasAttachedHalo() && core->boundaryCore_2D[1][1]) ? (core->coreArrayNeighborhoodSizes_2D[1][1][1]-dist-array.get_haloWidth(1)) : (core->coreArrayNeighborhoodSizes_2D[1][1][1]-dist); // The core array may higher dimensional then the array and if so then the local size along // the Z axis may be zero. If so, then we don't want to process the local array section. // if (sectionSize[2] == 1) // if (sectionSize[0] > 2 && sectionSize[1] > 2 && sectionSize[2] == 1) // if (core->coreArrayNeighborhoodSizes_2D[1][1][0] > 2 && core->coreArrayNeighborhoodSizes_2D[1][1][1] > 2) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] > (dist*2) && core->coreArrayNeighborhoodSizes_2D[1][1][1] > (dist*2) && core->coreArrayNeighborhoodSizes_2D[1][1][2] == 1) { for (int j = LBY; j < UBY; j++) { for (int i = LBX; i < UBX; i++) { // This is the dominant computation for each array section per core. The compiler will use the // user's code to derive the code that will be put here. #if 1 T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += (old_arraySection[index2D(i-1,j)] + old_arraySection[index2D(i+1,j)] + old_arraySection[index2D(i,j-1)] + old_arraySection[index2D(i,j+1)]); } arraySection[index2D(i,j)] = tmp / (4.0 * dist); #endif } } } } else { #if 0 printf ("2D array too small (still no interior) \n"); #endif } } else { if (arraySizeY == 2) { #if 0 printf ("2D array (with size 2 in Y axis) too small (still no interior) \n"); #endif } else { if (arraySizeX > 2) { // This is the case of 1D relaxation #if 0 printf ("This is the case of 1D relaxation sectionSize[0] = %d \n",sectionSize[0]); #endif // The core array may higher dimensional then the array and if so then the local size along either // the Y or Z axis may be zero. If so, then we don't want to process the local array section. // if (sectionSize[1] == 1 && sectionSize[2] == 1) // if (sectionSize[0] > 2 && sectionSize[1] == 1 && sectionSize[2] == 1) // if (sectionSize[0] > 0 && ((sectionSize[1] == 1 && sectionSize[2] == 1) || array.get_tableBasedDistribution() == false)) if (core->coreArrayNeighborhoodSizes_1D[1][0] > 0 && (core->coreArrayNeighborhoodSizes_1D[1][1] == 1 && core->coreArrayNeighborhoodSizes_1D[1][2] == 1)) { for (int i = 1; i < core->coreArrayNeighborhoodSizes_1D[1][0]-1; i++) { // This is the dominant computation for each array section per core. The compiler will use the // user's code to derive the code that will be put here. #if 0 printf ("i = %d old_arraySection[index1D(i-1)=%d] = %f \n",i,index1D(i-1),arraySection[index1D(i-1)]); printf ("i = %d old_arraySection[index1D(i+1)=%d] = %f \n",i,index1D(i+1),arraySection[index1D(i+1)]); #endif #if 1 T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += (old_arraySection[index1D(i-1)] + old_arraySection[index1D(i+1)]) / 2.0; } arraySection[index1D(i)] = tmp / (2.0*dist); #endif #if 0 printf ("arraySection[index1D(i=%d)=%d] = %f \n",i,index1D(i),arraySection[index1D(i)]); #endif } } else { #if 0 printf ("The local size for this arraySection is zero in either the Y or Z axis sectionSize[1] = %d sectionSize[2] = %d \n",sectionSize[1],sectionSize[2]); #endif } } else { // This is array does not have an interior upon which to relax. #if 0 printf ("1D array too small (still no interior) \n"); #endif } } } } } #endif #if 0 array.display("after relaxation on interior: array"); // old_array.display("after relaxation on interior: old_array"); #endif } template <typename T> void relax_on_boundary( int coreID, MulticoreArray<T> & array, MulticoreArray<T> & old_array, int dist ) { // assert(multicoreArray != NULL); const int arraySizeX = array.get_arraySize(0); const int arraySizeY = array.get_arraySize(1); const int arraySizeZ = array.get_arraySize(2); int p = coreID; Core<T>* core = array.coreArray[coreID]; // This lifts out loop invariant portions of the code. T** arraySectionPointers = array.get_arraySectionPointers(); T** old_arraySectionPointers = old_array.get_arraySectionPointers(); assert(arraySectionPointers != NULL); assert(old_arraySectionPointers != NULL); T* arraySection = array.get_arraySectionPointers()[p]; T* old_arraySection = old_array.get_arraySectionPointers()[p]; #if 0 printf ("\nIterate over all cores: p = %d arraySection = %p old_arraySection = %p \n",p,arraySection,old_arraySection); #endif assert(arraySection != NULL); assert(old_arraySection != NULL); #if 1 // ************************************************************** // Fixup internal bounaries of the memory allocated to each core. // ************************************************************** #if 0 printf ("Fixup boundaries: p = %d Array size (%d,%d,%d) sectionSize(%d,%d,%d) coreArray(%d,%d,%d) \n",p,arraySizeX,arraySizeY,arraySizeZ,sectionSize[0],sectionSize[1],sectionSize[2],array.get_coreArraySize(0),array.get_coreArraySize(1),array.get_coreArraySize(2)); #endif if (arraySizeZ > (2*dist)) { if (arraySizeY > (2*dist) && arraySizeX > (2*dist)) { // This is the case of 3D relaxation #if 0 printf ("This is the case of 3D relaxation \n"); // Iterate on the interior of the section (non-shared memory operation, local to the closest local memory declared for each core). printf ("This needs to use sectionSize[0-2] to get the local size instead of the global size! \n"); #endif if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] >= 1 || core->coreArrayNeighborhoodSizes_3D[1][1][1][1] >= 1) && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] >= 1) { // *************************************** // Now process the edges along the X axis // *************************************** if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) && (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1)) { if (core->boundaryCore_3D[1][0] == true) { #if 0 printf ("--- Apply the 3D array abstraction's UPPER boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d - idx),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,(idx-d),k)] ) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,(idx+d),k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idx,k)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idx,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,idx,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,idx,k+d)]); } arraySection[index3D(i,idx,k)] = tmp / (6.0*dist); } } } } } if (core->boundaryCore_3D[1][1] == true) { #if 0 printf ("--- Apply the 3D array abstraction's BOTTOM boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += (/* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1+d),k)] + /* array[Z][Y+1][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(d-idx-1),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1-d),k)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k+d)]); } arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] = tmp / (6.0*dist); } } } } } } else { /**TODO: adding special case for X size or Z size is only 1**/ } // *************************************** // Now process the edges along the Y axis // *************************************** if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) && (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1)) { if (core->boundaryCore_3D[0][0] == true) { #if 0 printf ("--- Apply the 3D array abstraction's LEFT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idx,j-d,k)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idx,j+d,k)] + /* array[Z][Y][X-1] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idx),j,k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(idx-d,j,k)])+ /* array[Z][Y][X+1] */ old_arraySection[index3D(idx+d,j,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idx,j,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idx,j,k+d)]); } arraySection[index3D(idx,j,k)] = tmp / (6.0*dist); } } } } } if (core->boundaryCore_3D[0][1] == true) { #if 0 printf ("--- Apply the 3D array abstraction's RIGHT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j-d,k)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j+d,k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1+d),j,k)] + /* array[Z][Y][X+1] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idx-1),j,k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1-d),j,k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k+d)]); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k)] = tmp / (6.0*dist); } } } } } } else { } // *************************************** // Now process the edges along the Z axis // *************************************** if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) && (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1)) { if (core->boundaryCore_3D[2][0] == true) { #if 0 printf ("--- Apply the 3D array abstraction's LEFT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,j-d,idx)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,j+d,idx)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,j,idx)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,j,idx)] + /* array[Z-1][Y][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idx),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,j,idx-d)]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,j,idx+d)]); } arraySection[index3D(i,j,idx)] = tmp / (6.0*dist); } } } } } if (core->boundaryCore_3D[2][1] == true) { #if 0 printf ("--- Apply the 3D array abstraction's RIGHT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1+d))] + /* array[Z+1][Y][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,j,(d-idx-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1-d))])); } arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] = tmp / (6.0*dist); } } } } } } else { } // ******************** // End of plane updates // ******************** // ******************** // Edge updates along X axis // ******************** if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if ((core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),idxz,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,idxy-d,idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,idxy+d,idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idxy,idxz)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idxy,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,idxy,idxz-d)]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,idxy,idxz+d)]); } arraySection[index3D(i,idxy,idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,idxy-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,idxy+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,idxy,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(d-idxy-1),idxz,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))]); } arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(d-idxy-1),core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } } else { } // ******************** // Edge updates along Y axis // ******************** if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,j-d,idxz)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,j+d,idxz)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),j,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),j,idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),j,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(idxx,j,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,j,(idxz+d))]); } arraySection[index3D(idxx,j,idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),j,core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(idxx,j,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j-d,idxz)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j+d,idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),j,idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),j,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),j,idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,(idxz+d))]); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),j,core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]-(idxx+1),j,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } } else { } // ******************** // Edge updates along Z axis // ******************** if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(idxx,idxy-d,k)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,idxy+d,k)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),idxy,k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,k)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,idxy,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,idxy,k+d)]); } arraySection[index3D(idxx,idxy,k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),k)] + /* array[Z][Y+1][X] */ ((d > idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(idxx,(d-idxy-1),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),k)]) + /* array[Z][Y][X-1] */ ((d > idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k+d)]); } arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),k)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,k)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),idxy,k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k+d)]) ; } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),k)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]-(idxx+1),(d-idxy-1),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),k)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1),k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k+d)]); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } } else { } // ******************** // End of edge updates // ******************** // ******************** // corners updates // ******************** if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),idxz,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(idxx,(idxy-d),idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,(idxy+d),idxz)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),idxy,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(idxx,idxy,(idxz-d))] ) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,idxy,(idxz+d))] ) ; } arraySection[index3D(idxx,idxy,idxz)] = tmp / (6.0*dist) ; } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(idxx,(idxy-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,(idxy+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(idxx,idxy,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(idxx,(d-idxy-1),idxz,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),idxz,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(idxx-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D(idxx+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))]); } arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(idxx,(d-idxy-1),core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxz+1))]) + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[2][1][1][1]-(idxy+1),(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),idxz,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),idxy,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,(idxz+d))] ); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,idxz)] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]-(idxx+1),idxy,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]-(idxx+1),(d-idxy-1),idxz,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1),idxz,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))] ); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]-(idxx+1),(d-idxy-1),core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[2][1][1][1]-(idxy+1),(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } // ******************** // End of corner updates // ******************** } else { #if 0 printf ("This array segment can't be processed for edge handling because it is too small in at least one axis: p = %d size = (%d,%d,%d) \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1],core->coreArrayNeighborhoodSizes_2D[1][1][2]); #endif // assert(false); } #if 0 // This is required to avoid valgrind reported errors on some blocks where the local (sectionSize[dim]) is zero. // This is likely because of over flow from size_t type veraibles. if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 2 && core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 2 && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 2) { for (int k = 1; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-1; k++) { for (int j = 1; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-1; j++) { for (int i = 1; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-1; i++) { // This is the dominant computation for each array section per core. The compiler will use the // user's code to derive the code that will be put here. #if 0 printf ("p= %d Indexing 3D array (i,j,k) = (%d,%d,%d) \n",p,i,j,k); #endif #if 0 arraySection[index3D(i,j,k)] = (old_arraySection[index3D(i-1,j-1,k-1)] + old_arraySection[index3D(i+1,j-1,k-1)] + old_arraySection[index3D(i-1,j+1,k-1)] + old_arraySection[index3D(i+1,j+1,k-1)] + old_arraySection[index3D(i-1,j-1,k+1)] + old_arraySection[index3D(i+1,j-1,k+1)] + old_arraySection[index3D(i-1,j+1,k+1)] + old_arraySection[index3D(i+1,j+1,k+1)]) / 8.0; #endif } } } } #endif } else { #if 0 printf ("3D array too small (still no interior) \n"); #endif } } else { if (arraySizeZ == 2) { #if 0 printf ("3D array (with size 2 in Z axis) too small (still no interior) \n"); #endif } else { if (arraySizeY > 2) { if (arraySizeX > 2) { // This is the case of 2D relaxation (along edges) #if 0 printf ("This is the case of 2D relaxation \n"); printf ("This needs to use sectionSize[0-1] to get the local size instead of the global size! \n"); #endif #if 1 // The core array may higher dimensional then the array and if so then the local size along // the Z axis may be zero. If so, then we don't want to process the local array section. // if ((core->coreArrayNeighborhoodSizes_2D[1][1][0] >= 2 || core->coreArrayNeighborhoodSizes_2D[1][1][1] >= 2) && core->coreArrayNeighborhoodSizes_2D[1][1][2] == 1) if ((core->coreArrayNeighborhoodSizes_2D[1][1][0] >= 1 || core->coreArrayNeighborhoodSizes_2D[1][1][1] >= 1) && core->coreArrayNeighborhoodSizes_2D[1][1][2] == 1) { // Handle the internal boundary equations along edges of the 2D arrays. // *************************************** // Now process the edges along the X axis. // *************************************** // if (sectionSize[1] > 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][1] > 1) { #if 0 printf ("-- leftEdgeSection[1] = %s rightEdgeSection[1] = %s \n",leftEdgeSection[1] ? "true" : "false",rightEdgeSection[1] ? "true" : "false"); #endif // if (leftEdgeSection[1] == true) if (core->boundaryCore_2D[1][0] == true) { #if 0 printf ("--- Apply the 2D array abstraction's UPPER boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. #if 0 printf ("apply 2D equation at left edge of memory segment core->coreArrayNeighborhoodSizes_2D[0][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[0][1][1]); #endif // if (previous_sectionSize[1] > 0) if (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0) { // Upper edge // ***** | ****** | ***** // ---------------------- // ***** | *XXXX* | ***** // ***** | ****** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // arraySection[0] = (old_arraySectionPointers[previous_coreIndexInLinearArray][previous_sectionSize[0]-1] + old_arraySection[1]) / 2.0; // for (int i = 1; i < sectionSize[0]-1; i++) for (int i = 1; i < core->coreArrayNeighborhoodSizes_2D[1][1][0]-1; i++) { // arraySection[index2D(i,0)] = (old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(i-1,previous_sectionSize[1]-1)] + old_arraySection[index2D(i-1,1)] + // old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(i+1,previous_sectionSize[1]-1)] + old_arraySection[index2D(i+1,1)]) / 4.0; arraySection[index2D(i,0)] = ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(i,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySection[index2D(i,1)] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,0)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,0)]) / 4.0; } } } // if (rightEdgeSection[1] == true) if (core->boundaryCore_2D[1][1] == true) { #if 0 printf ("--- Apply the array abstraction's LOWER boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_rightEdge = (left_stencil_cell_rightEdge + right_stencil_cell_rightEdge) / 2.0; #if 0 printf ("apply 2D equation at right edge of memory segment core->coreArrayNeighborhoodSizes_2D[2][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[2][1][1]); #endif // if (next_sectionSize[1] > 0) if (core->coreArrayNeighborhoodSizes_2D[2][1][1] > 0) { // Lower edge // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | ****** | ***** // ***** | *XXXX* | ***** // ---------------------- // ***** | ****** | ***** // for (int i = 1; i < sectionSize[0]-1; i++) for (int i = 1; i < core->coreArrayNeighborhoodSizes_2D[1][1][0]-1; i++) { arraySection[index2D(i,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(i,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(i,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; } } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). // if (sectionSize[1] == 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { #if 0 printf ("--- Trivial case of only one 2D equation (define this to be UPPER edge) \n"); printf ("--- core->boundaryCore_2D[1][0] = %s core->boundaryCore_2D[1][1] = %s \n",core->boundaryCore_2D[1][0] ? "true" : "false",core->boundaryCore_2D[1][1] ? "true" : "false"); #endif // if (leftEdgeSection[1] == false && rightEdgeSection[1] == false) if (core->boundaryCore_2D[1][0] == false && core->boundaryCore_2D[1][1] == false) { // This is where user specific code is places within the compiler transformation. // if (previous_sectionSize[1] > 0 && next_sectionSize[1] > 0) if (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0 && core->coreArrayNeighborhoodSizes_2D[2][1][1] > 0) { // Upper and Lower edges are the same // ***** | ****** | ***** // ---------------------- // ***** | *XXXX* | ***** // ---------------------- // ***** | ****** | ***** #if 0 printf ("--- Processing trivial case of only one equation 2D (edge in X axis) \n"); #endif // for (int i = 1; i < sectionSize[0]-1; i++) for (int i = 1; i < core->coreArrayNeighborhoodSizes_2D[1][1][0]-1; i++) { arraySection[index2D(i,0)] = ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(i,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(i,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,0)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,0)]) / 4.0; } } } } else { // assert(sectionSize[1] == 0); assert(core->coreArrayNeighborhoodSizes_2D[1][1][1] == 0); #if 0 printf ("--- core->coreArrayNeighborhoodSizes_2D[1][1][1] == 0: This is the trival case \n"); #endif } } #if 1 // *************************************** // Now process the edges along the Y axis. // *************************************** #if 0 printf ("---+++ Process the edges of the memory section on core index = %d sectionSize[0] = %d previous_sectionSize[0] = %d next_sectionSize[0] = %d \n",p,sectionSize[0],previous_sectionSize[0],next_sectionSize[0]); #endif // if (sectionSize[0] > 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] > 1) { #if 0 printf ("---+++ leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == true) if (core->boundaryCore_2D[0][0] == true) { #if 0 printf ("---+++ Apply the array abstraction's LEFT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_leftEdge = (left_stencil_cell_leftEdge + right_stencil_cell_leftEdge) / 2.0; #if 0 printf ("apply equation at left edge of memory segment core->coreArrayNeighborhoodSizes_2D[1][0][0] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][0][0]); #endif // if (previous_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0) { // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | X***** | ***** // ***** | X***** | ***** // ***** | X***** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // for (int j = 1; j < sectionSize[1]-1; j++) for (int j = 1; j < core->coreArrayNeighborhoodSizes_2D[1][1][1]-1; j++) { #if 1 arraySection[index2D(0,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(0,j+1)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(1,j)]) / 4.0; #endif } } } // if (rightEdgeSection[0] == true) if (core->boundaryCore_2D[0][1] == true) { #if 0 printf ("---+++ Apply the array abstraction's RIGHT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_rightEdge = (left_stencil_cell_rightEdge + right_stencil_cell_rightEdge) / 2.0; #if 0 printf ("apply equation at right edge of memory segment core->coreArrayNeighborhoodSizes_2D[1][2][0] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][2][0]); #endif // if (next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | *****X | ***** // ***** | *****X | ***** // ***** | *****X | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // for (int j = 1; j < sectionSize[1]-1; j++) for (int j = 1; j < core->coreArrayNeighborhoodSizes_2D[1][1][1]-1; j++) { // arraySection[index2D(sectionSize[0]-1,j)] = (old_arraySection[index2D(sectionSize[0]-2,j-1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j-1)] + // old_arraySection[index2D(sectionSize[0]-2,j+1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j+1)]) / 4.0; #if 0 printf ("array[Y][X]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)]); printf ("array[Y-1][X]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)]); printf ("array[Y+1][X]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)]); printf ("array[Y][X-1]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)]); printf ("p = %d core->coreArrayNeighborhoodLinearized_2D[1][2] = %d \n",p,core->coreArrayNeighborhoodLinearized_2D[1][2]); printf ("p = %d core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0]); printf ("p = %d core->coreArrayNeighborhoodSizes_2D[1][2][0] = %d \n",p,core->coreArrayNeighborhoodSizes_2D[1][2][0]); // printf ("array[Y][X+1]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)] = %f \n",old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)]); printf ("array[Y][X+1]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])] = %f \n", old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])]); #endif #if 1 // This fails for some random problem... arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). // if (sectionSize[0] == 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1) { #if 0 printf ("---+++ Trivial case of only one equation (define this to be left edge; use the associated references) \n"); printf ("---+++ leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == false && rightEdgeSection[0] == false) if (core->boundaryCore_2D[0][0] == false && core->boundaryCore_2D[0][1] == false) { // This is where user specific code is places within the compiler transformation. // if (previous_sectionSize[0] > 0 && next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0 && core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // ***** | * | ***** // ---------------------- // ***** | * | ***** // ***** | X | ***** // ***** | X | ***** // ***** | X | ***** // ***** | * | ***** // ---------------------- // ***** | * | ***** #if 0 printf ("---+++ Processing trivial case of only one equation \n"); #endif // for (int j = 1; j < sectionSize[1]-1; j++) for (int j = 1; j < core->coreArrayNeighborhoodSizes_2D[1][1][1]-1; j++) { // arraySection[index2D(0,j)] = (old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(previous_sectionSize[0]-1,j-1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j-1)] + // old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(previous_sectionSize[0]-1,j+1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j+1)]) / 4.0; #if 1 arraySection[index2D(0,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(0,j+1)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } } } else { // assert(sectionSize[0] == 0); assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] == 0); #if 0 printf ("---+++ core->coreArrayNeighborhoodSizes_2D[1][0][0] == 0: This is the trival case \n"); #endif } } // ******************** // End of Y Axis update // ******************** #endif #if 1 // ******************************************** // Now process the corners of the X and Y axis. // ******************************************** #if 0 printf ("---+++ Process the edges of the memory section on core p = %d core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d core->coreArrayNeighborhoodSizes_2D[1][0][0] = %d core->coreArrayNeighborhoodSizes_2D[1][2][0] = %d \n", p,core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][0][0],core->coreArrayNeighborhoodSizes_2D[1][2][0]); printf ("Sizes of current processor: core->coreArrayNeighborhoodSizes_2D[1][1] = (%d,%d,%d) \n",core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1],core->coreArrayNeighborhoodSizes_2D[1][1][2]); #endif // First X Axis logic if (core->coreArrayNeighborhoodSizes_2D[1][1][0] > 1) { // Left sice corners if (core->boundaryCore_2D[0][0] == true) { // processor boundary condition enforced here (X axis) } else { if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0) { // Next Y Axis logic if (core->coreArrayNeighborhoodSizes_2D[1][1][1] > 1) { // Upper corner if (core->boundaryCore_2D[1][0] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][0] > 0); assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); // Upper left corner // ***** | ****** | ***** // ---------------------- // ***** | X***** | ***** // ***** | ****** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** #if 1 arraySection[index2D(0,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySection[index2D(1,0)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(0,1)]) / 4.0; #endif } // Lower corner if (core->boundaryCore_2D[1][1] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); assert (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0); // Lower left corner // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | ****** | ***** // ***** | X***** | ***** // ---------------------- // ***** | ****** | ***** #if 0 printf ("--- array[Y][X]: arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); printf ("old_array[Y][X]: old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); printf ("old_array[Y-1][X]: old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] = %f \n",old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)]); printf ("old_array[Y+1][X]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(0,0)] = %f \n",old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(0,0)]); printf ("old_array[Y][X-1]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1)] = %f \n", old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1)]); printf ("array[Y][X+1]: old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); #endif #if 1 arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(0,0)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; #endif #if 0 printf ("--- array[Y][X]: arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); #endif } } else { // printf ("core->coreArrayNeighborhoodSizes_2D[1][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][1]); if (core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { // Case of upper and lower left corners are the same point // ***** | ****** | ***** // ---------------------- // ***** | X***** | ***** // ---------------------- // ***** | ****** | ***** #if 1 arraySection[index2D(0,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + // /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,0)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(1,0)]) / 4.0; #endif } } } else { printf ("We don't support the size on the adjacent being zero! \n"); assert(false); } } // Right side corners if (core->boundaryCore_2D[0][1] == true) { // Can we test if this is realy a boundary? } else { // if (next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // printf ("Right boundary corner not implemented! \n"); // Next Y Axis logic if (core->coreArrayNeighborhoodSizes_2D[1][1][1] > 1) { // Upper corner if (core->boundaryCore_2D[1][0] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][0] > 0); assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); // Upper right corner // ***** | ****** | ***** // ---------------------- // ***** | *****X | ***** // ***** | ****** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** #if 1 arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,1)] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,0)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,0)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } // Lower corner if (core->boundaryCore_2D[1][1] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); assert (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0); // Lower right corner // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | ****** | ***** // ***** | *****X | ***** // ---------------------- // ***** | ****** | ***** #if 1 arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + // /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,core->coreArrayNeighborhoodSizes_2D[2][1][1]-1)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[2][1][1]-1,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } else { // printf ("core->coreArrayNeighborhoodSizes_2D[1][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][1]); if (core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { // Case of upper and lower right corners are the same point // ***** | ****** | ***** // ---------------------- // ***** | *****X | ***** // ---------------------- // ***** | ****** | ***** #if 1 arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + // /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,0)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,0)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } } else { printf ("We don't support the size on the adjacent being zero! \n"); assert(false); } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). #if 0 printf ("Case of core->coreArrayNeighborhoodSizes_2D[1][1][0] == %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][0]); printf ("Case of core->coreArrayNeighborhoodSizes_2D[1][1][1] == %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][1]); #endif // assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1); // assert(core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1); // if (sectionSize[0] == 1) // if (core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1 && core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { // printf ("Case of core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1 && core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1\n"); // if (leftEdgeSection[0] == false && rightEdgeSection[0] == false) // if (core->boundaryCore_2D[0][0] == false && core->boundaryCore_2D[0][1] == false) if (core->boundaryCore_2D[0][0] == false && core->boundaryCore_2D[0][1] == false && core->boundaryCore_2D[1][0] == false && core->boundaryCore_2D[1][1] == false) { // if (previous_sectionSize[0] > 0 && next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0 && core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // printf ("Case of single point boundary not implemented! \n"); // ***** | * | ***** // ----------------- // ***** | X | ***** // ----------------- // ***** | * | ***** #if 1 arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } #if 0 printf ("Exiting as a test! \n"); assert(false); #endif } } else { // assert(sectionSize[0] == 0); if (core->coreArrayNeighborhoodSizes_2D[1][1][0] != 0) { #if 0 printf ("Warning: p = %d core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0]); #endif } // assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] == 0); assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] <= 1); #if 0 printf ("---+++ core->coreArrayNeighborhoodSizes_2D[1][0][0] == 0: This is the trival case \n"); #endif } } // ************************************************** // End of processing the corners of the X and Y axis. // ************************************************** #endif } else { #if 0 printf ("This array segment can't be processed for edge handling because it is too small in at least one axis: p = %d size = (%d,%d,%d) \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1],core->coreArrayNeighborhoodSizes_2D[1][1][2]); #endif // assert(false); } #endif } else { #if 0 printf ("2D array too small (still no interior) \n"); #endif } } else { if (arraySizeY == 2) { #if 0 printf ("2D array (with size 2 in Y axis) too small (still no interior) \n"); #endif } else { if (arraySizeX > 2) { // This is the case of 1D relaxation #if 0 printf ("--- This is the case of 1D relaxation sectionSize[0] = %d \n",sectionSize[0]); #endif // The core array may higher dimensional then the array and if so then the local size along either // the Y or Z axis may be zero. If so, then we don't want to process the local array section. // if (sectionSize[1] == 1 && sectionSize[2] == 1) // if (sectionSize[0] > 0 && ((sectionSize[1] == 1 && sectionSize[2] == 1) || array.get_tableBasedDistribution() == false)) // if (sectionSize[0] > 0 && (sectionSize[1] == 1 && sectionSize[2] == 1) ) if (core->coreArrayNeighborhoodSizes_1D[1][0] > 0 && (core->coreArrayNeighborhoodSizes_1D[1][1] == 1 && core->coreArrayNeighborhoodSizes_1D[1][2] == 1) ) { #if 0 printf ("--- Process the edges of the memory section on core index = %d sectionSize[0] = %d previous_sectionSize[0] = %d next_sectionSize[0] = %d \n",p,sectionSize[0],previous_sectionSize[0],next_sectionSize[0]); #endif // if (sectionSize[0] > 1) if (core->coreArrayNeighborhoodSizes_1D[1][0] > 1) { #if 0 printf ("-- leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == true) if (core->boundaryCore_1D[0] == true) { #if 0 printf ("--- Apply the array abstraction's LEFT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_leftEdge = (left_stencil_cell_leftEdge + right_stencil_cell_leftEdge) / 2.0; #if 0 printf ("apply equation at left edge of memory segment previous_sectionSize[0] = %d \n",previous_sectionSize[0]); #endif // if (previous_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_1D[0][0] > 0) { // arraySection[0] = (old_arraySectionPointers[previous_coreIndexInLinearArray][core->coreArrayNeighborhoodSizes_1D[0][0]-1] + old_arraySection[1]) / 2.0; arraySection[0] = (old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[0]][core->coreArrayNeighborhoodSizes_1D[0][0]-1] + old_arraySection[1]) / 2.0; } } // if (rightEdgeSection[0] == true) if (core->boundaryCore_1D[1] == true) { #if 0 printf ("--- Apply the array abstraction's RIGHT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_rightEdge = (left_stencil_cell_rightEdge + right_stencil_cell_rightEdge) / 2.0; #if 0 printf ("apply equation at right edge of memory segment next_sectionSize[0] = %d \n",next_sectionSize[0]); #endif // if (next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_1D[2][0] > 0) { // arraySection[sectionSize[0]-1] = (old_arraySection[sectionSize[0]-2] + old_arraySectionPointers[next_coreIndexInLinearArray][0]) / 2.0; arraySection[core->coreArrayNeighborhoodSizes_1D[1][0]-1] = (old_arraySection[core->coreArrayNeighborhoodSizes_1D[1][0]-2] + old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[2]][0]) / 2.0; } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). // if (sectionSize[0] == 1) if (core->coreArrayNeighborhoodSizes_1D[1][0] == 1) { #if 0 printf ("--- Trivial case of only one equation (define this to be left edge; use the associated references) \n"); printf ("--- leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == false && rightEdgeSection[0] == false) if (core->boundaryCore_1D[0] == false && core->boundaryCore_1D[1] == false) { // This is where user specific code is places within the compiler transformation. // if (previous_sectionSize[0] > 0 && next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_1D[0][0] > 0 && core->coreArrayNeighborhoodSizes_1D[2][0] > 0) { #if 0 printf ("--- Processing trivial case of only one equation \n"); #endif // arraySection[0] = (old_arraySectionPointers[previous_coreIndexInLinearArray][previous_sectionSize[0]-1] + old_arraySectionPointers[next_coreIndexInLinearArray][0]) / 2.0; arraySection[0] = (old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[0]][core->coreArrayNeighborhoodSizes_1D[0][0]-1] + old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[2]][0]) / 2.0; } } } else { // assert(sectionSize[0] == 0); assert(core->coreArrayNeighborhoodSizes_1D[1][0] == 0); #if 0 printf ("--- sectionSize[0] == 0: This is the trival case \n"); #endif } } } else { #if 0 printf ("--- The local size for this arraySection is zero in either the Y or Z axis sectionSize[1] = %d sectionSize[2] = %d \n",sectionSize[1],sectionSize[2]); #endif } } else { // This is array does not have an interior upon which to relax. #if 0 printf ("--- 1D array too small (still no interior) \n"); #endif } } } } } #endif #if 0 array.display("after relaxation on memory section edges: array"); old_array.display("after relaxation on memory section edges: old_array"); #endif } // ********************************************************************* // Original boundary relaxation without Halo regions in local memory template <typename T> void relax_on_boundary_simplified( int coreID, MulticoreArray<T> & array, MulticoreArray<T> & old_array, int dist ) { const int arraySizeX = array.get_arraySize(0); const int arraySizeY = array.get_arraySize(1); const int arraySizeZ = array.get_arraySize(2); int p = coreID; Core<T>* core = array.coreArray[coreID]; // This lifts out loop invariant portions of the code. T** arraySectionPointers = array.get_arraySectionPointers(); T** old_arraySectionPointers = old_array.get_arraySectionPointers(); assert(arraySectionPointers != NULL); assert(old_arraySectionPointers != NULL); T* arraySection = array.get_arraySectionPointers()[p]; T* old_arraySection = old_array.get_arraySectionPointers()[p]; #if 0 printf ("\nIterate over all cores: p = %d arraySection = %p old_arraySection = %p \n",p,arraySection,old_arraySection); #endif assert(arraySection != NULL); assert(old_arraySection != NULL); // ************************************************************** // Fixup internal bounaries of the memory allocated to each core. // ************************************************************** #if 0 printf ("Fixup boundaries: p = %d Array size (%d,%d,%d) sectionSize(%d,%d,%d) coreArray(%d,%d,%d) \n",p,arraySizeX,arraySizeY,arraySizeZ,sectionSize[0],sectionSize[1],sectionSize[2],array.get_coreArraySize(0),array.get_coreArraySize(1),array.get_coreArraySize(2)); #endif if (arraySizeZ > (2*dist)) { if (arraySizeY > (2*dist) && arraySizeX > (2*dist)) { // This is the case of 3D relaxation #if 0 printf ("This is the case of 3D relaxation \n"); // Iterate on the interior of the section (non-shared memory operation, local to the closest local memory declared for each core). printf ("This needs to use sectionSize[0-2] to get the local size instead of the global size! \n"); #endif if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] >= 1 || core->coreArrayNeighborhoodSizes_3D[1][1][1][1] >= 1) && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] >= 1) { int base_X = (core->boundaryCore_3D[0][0] == true) ? dist : 0; int bound_X = (core->boundaryCore_3D[0][1] == true) ? core->coreArrayNeighborhoodSizes_3D[1][1][1][0] - (dist): core->coreArrayNeighborhoodSizes_3D[1][1][1][0]; int base_Y = (core->boundaryCore_3D[1][0] == true) ? dist : 0; int bound_Y = (core->boundaryCore_3D[1][1] == true) ? core->coreArrayNeighborhoodSizes_3D[1][1][1][1] - (dist): core->coreArrayNeighborhoodSizes_3D[1][1][1][1]; int base_Z = (core->boundaryCore_3D[2][0] == true) ? dist : 0; int bound_Z = (core->boundaryCore_3D[2][1] == true) ? core->coreArrayNeighborhoodSizes_3D[1][1][1][2] - (dist): core->coreArrayNeighborhoodSizes_3D[1][1][1][2]; for (int k = base_Z; k < bound_Z; k++) { for (int j = base_Y; j < bound_Y; j++) { for (int i = base_X; i < bound_X; i++) { if((i >= dist) && (i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist) && (j >= dist) && (j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist) && (k >= dist) && (k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist)) continue; T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y][X-d] */ ((i-d < 0) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]+(i-d),j,k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(i-d,j,k)]) + /* array[Z][Y][X+d] */ ((i+d >= core->coreArrayNeighborhoodSizes_3D[1][1][1][0]) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((i+d-core->coreArrayNeighborhoodSizes_3D[1][1][2][0]),j,k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(i+d,j,k)]) + /* array[Z][Y-d][X] */ ((j-d < 0) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]+(j-d),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,j-d,k)]) + /* array[Z][Y+d][X] */ ((j+d >= core->coreArrayNeighborhoodSizes_3D[1][1][1][1]) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(j+d-core->coreArrayNeighborhoodSizes_3D[1][2][1][2]),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,j+d,k)]) + /* array[Z-d][Y][X] */ ((k-d < 0) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]+(k-d),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,j,k-d)]) + /* array[Z+d][Y][X] */ ((k+d >= core->coreArrayNeighborhoodSizes_3D[1][1][1][2]) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,j,(k+d-core->coreArrayNeighborhoodSizes_3D[2][1][1][2]),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,j,k+d)]) ); } arraySection[index3D(i,j,k)] = tmp / (6.0*dist); } } } } } } } // ********************************************************************* // New boundary relaxation with DetachedHalo regions in local memory // This is applied only when user construct MulticoreArray with postive halo region size. template <typename T> void relax_on_detachedhalo_boundary( int coreID, MulticoreArray<T> & array, MulticoreArray<T> & old_array, int dist ) { const int arraySizeX = array.get_arraySize(0); const int arraySizeY = array.get_arraySize(1); const int arraySizeZ = array.get_arraySize(2); int p = coreID; Core<T>* core = array.coreArray[coreID]; // This lifts out loop invariant portions of the code. T** arraySectionPointers = array.get_arraySectionPointers(); T** old_arraySectionPointers = old_array.get_arraySectionPointers(); assert(arraySectionPointers != NULL); assert(old_arraySectionPointers != NULL); T* arraySection = array.get_arraySectionPointers()[p]; T* old_arraySection = old_array.get_arraySectionPointers()[p]; #if 0 printf ("\nIterate over all cores: p = %d arraySection = %p old_arraySection = %p \n",p,arraySection,old_arraySection); #endif assert(arraySection != NULL); assert(old_arraySection != NULL); #if 1 // ************************************************************** // Fixup internal bounaries of the memory allocated to each core. // ************************************************************** #if 0 printf ("Fixup boundaries: p = %d Array size (%d,%d,%d) sectionSize(%d,%d,%d) coreArray(%d,%d,%d) \n",p,arraySizeX,arraySizeY,arraySizeZ,sectionSize[0],sectionSize[1],sectionSize[2],array.get_coreArraySize(0),array.get_coreArraySize(1),array.get_coreArraySize(2)); #endif if (arraySizeZ > (2*dist)) { if (arraySizeY > (2*dist) && arraySizeX > (2*dist)) { // This is the case of 3D relaxation #if 0 printf ("This is the case of 3D relaxation \n"); // Iterate on the interior of the section (non-shared memory operation, local to the closest local memory declared for each core). printf ("This needs to use sectionSize[0-2] to get the local size instead of the global size! \n"); #endif if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] >= 1 || core->coreArrayNeighborhoodSizes_3D[1][1][1][1] >= 1) && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] >= 1) { T** old_haloXBottom = old_array.get_haloSectionPointers(0,0); T** old_haloXTop = old_array.get_haloSectionPointers(0,1); T** old_haloYBottom = old_array.get_haloSectionPointers(1,0); T** old_haloYTop = old_array.get_haloSectionPointers(1,1); T** old_haloZBottom = old_array.get_haloSectionPointers(2,0); T** old_haloZTop = old_array.get_haloSectionPointers(2,1); // *************************************** // Now process the edges along the X axis // *************************************** if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) && (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1)) { if (core->boundaryCore_3D[1][0] == true) { #if 0 printf ("--- Apply the 3D array abstraction's UPPER boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(1)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idx) ? old_haloYBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-(d-idx)),k,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,(idx-d),k)] ) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,(idx+d),k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idx,k)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idx,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,idx,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,idx,k+d)]); else tmp += ( /* array[Z][Y-1][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d - idx),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,(idx-d),k)] ) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,(idx+d),k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idx,k)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idx,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,idx,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,idx,k+d)]); } arraySection[index3D(i,idx,k)] = tmp / (6.0*dist); } } } } } if (core->boundaryCore_3D[1][1] == true) { #if 0 printf ("--- Apply the 3D array abstraction's BOTTOM boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(1)[coreID]) tmp += (/* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1+d),k)] + /* array[Z][Y+1][X] */ ((d>idx) ? old_haloYTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(d-idx-1),k,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1-d),k)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k+d)]); else tmp += (/* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1+d),k)] + /* array[Z][Y+1][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(d-idx-1),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1-d),k)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k+d)]); } arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idx+1),k)] = tmp / (6.0*dist); } } } } } } else { /**TODO: adding special case for X size or Z size is only 1**/ } // *************************************** // Now process the edges along the Y axis // *************************************** if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) && (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1)) { if (core->boundaryCore_3D[0][0] == true) { #if 0 printf ("--- Apply the 3D array abstraction's LEFT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idx,j-d,k)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idx,j+d,k)] + /* array[Z][Y][X-1] */ ((d>idx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idx)),j,k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(idx-d,j,k)])+ /* array[Z][Y][X+1] */ old_arraySection[index3D(idx+d,j,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idx,j,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idx,j,k+d)]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idx,j-d,k)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idx,j+d,k)] + /* array[Z][Y][X-1] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idx),j,k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(idx-d,j,k)])+ /* array[Z][Y][X+1] */ old_arraySection[index3D(idx+d,j,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idx,j,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idx,j,k+d)]); } arraySection[index3D(idx,j,k)] = tmp / (6.0*dist); } } } } } if (core->boundaryCore_3D[0][1] == true) { #if 0 printf ("--- Apply the 3D array abstraction's RIGHT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j-d,k)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j+d,k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1+d),j,k)] + /* array[Z][Y][X+1] */ ((d>idx) ? old_haloXTop[coreID][otherCore_index3D((d-idx-1),j,k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1-d),j,k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k+d)]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j-d,k)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j+d,k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1+d),j,k)] + /* array[Z][Y][X+1] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idx-1),j,k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1-d),j,k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k+d)]); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idx+1),j,k)] = tmp / (6.0*dist); } } } } } } else { } // *************************************** // Now process the edges along the Z axis // *************************************** if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) && (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1)) { if (core->boundaryCore_3D[2][0] == true) { #if 0 printf ("--- Apply the 3D array abstraction's LEFT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,j-d,idx)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,j+d,idx)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,j,idx)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,j,idx)] + /* array[Z-1][Y][X] */ ((d>idx) ? old_haloZBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(j+old_array.get_haloWidth(1)),(old_array.get_haloWidth(2)-(d-idx)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,j,idx-d)]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,j,idx+d)]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,j-d,idx)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,j+d,idx)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,j,idx)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,j,idx)] + /* array[Z-1][Y][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idx),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,j,idx-d)]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,j,idx+d)]); } arraySection[index3D(i,j,idx)] = tmp / (6.0*dist); } } } } } if (core->boundaryCore_3D[2][1] == true) { #if 0 printf ("--- Apply the 3D array abstraction's RIGHT boundary condition \n"); #endif } else { if (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idx=0; idx <dist; idx++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1+d))] + /* array[Z+1][Y][X] */ ((d>idx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,j,(d-idx-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1+d))] + /* array[Z+1][Y][X] */ ((d>idx) ? old_haloZTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(j+old_array.get_haloWidth(1)),(d-idx-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1-d))])); } arraySection[index3D(i,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idx+1))] = tmp / (6.0*dist); } } } } } } else { } // ******************** // End of plane updates // ******************** // ******************** // Edge updates along X axis // ******************** if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if ((core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-(d-idxy)),idxz,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,idxy-d,idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,idxy+d,idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idxy,idxz)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idxy,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),old_array.get_haloWidth(1)+idxy,(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,idxy,idxz-d)]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,idxy,idxz+d)]); else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),idxz,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,idxy-d,idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,idxy+d,idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idxy,idxz)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idxy,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,idxy,idxz-d)]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,idxy,idxz+d)]); } arraySection[index3D(i,idxy,idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-(d-idxy)),(core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1)),(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,idxy-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,idxy+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)+idxy),(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(i,idxy-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(i,idxy+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,idxy,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(i,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(d-idxy-1),idxz,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[1][1][1][1]+old_array.get_haloWidth(1)-(idxy+1)),(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(d-idxy-1),idxz,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))]); } arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int i = dist; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist; i++) { for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(d-idxy-1),(core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1)),(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[1][1][1][1]+old_array.get_haloWidth(1)-(idxy+1)),(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(i,(d-idxy-1),core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(i-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ old_arraySection[index3D(i+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(i,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } } else { } // ******************** // Edge updates along Y axis // ******************** if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,j-d,idxz)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,j+d,idxz)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),j,idxz,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),j,idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),j,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)+idxx),(j+old_array.get_haloWidth(1)),(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(idxx,j,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,j,(idxz+d))]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,j-d,idxz)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,j+d,idxz)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),j,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),j,idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),j,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(idxx,j,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,j,(idxz+d))]); } arraySection[index3D(idxx,j,idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),j,(core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1)),old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(j+old_array.get_haloWidth(1)),(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),j,core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(idxx,j,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(idxx,j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j-d,idxz)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j+d,idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),j,idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),j,idxz,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),j,idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(j+old_array.get_haloWidth(1)),(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,(idxz+d))]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j-d,idxz)] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j+d,idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),j,idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),j,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),j,idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,(idxz+d))]); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,idxz)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for (int j = dist; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist; j++) { for(int idxx=0; idxx <dist; idxx++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),j,(core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1)),old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(j+old_array.get_haloWidth(1)),(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),j,core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]-(idxx+1),j,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),j,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } } else { } } else { } } else { } } } else { } // ******************** // Edge updates along Z axis // ******************** if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(old_array.get_haloWidth(1)-(d-idxy)),k,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(idxx,idxy-d,k)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,idxy+d,k)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),idxy,k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,k)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,idxy,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,idxy,k+d)]); else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(idxx,idxy-d,k)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,idxy+d,k)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),idxy,k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,k)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,idxy,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,idxy,k+d)]); } arraySection[index3D(idxx,idxy,k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),k)] + /* array[Z][Y+1][X] */ ((d > idxy) ? old_haloYTop[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(d-idxy-1),k,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),k)]) + /* array[Z][Y][X-1] */ ((d > idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k+d)]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),k)] + /* array[Z][Y+1][X] */ ((d > idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(idxx,(d-idxy-1),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),k)]) + /* array[Z][Y][X-1] */ ((d > idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),k,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k+d)]); } arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(old_array.get_haloWidth(1)-(d-idxy)),k,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),k)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,k)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),idxy,k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k+d)]) ; else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),k,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),k)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),k)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,k)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),idxy,k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k+d)]) ; } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][1] == true)) { // processor boundary condition enforced here (YZ bottom corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { for (int k = dist; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist; k++) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),k)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(d-idxy-1),k,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),k)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),(core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1)),k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k+d)]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),k)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]-(idxx+1),(d-idxy-1),k,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),k)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1),k,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k-d)] + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k+d)]); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),k)] = tmp / (6.0*dist); } } } else { } } else { } } else { } } } else { } // ******************** // End of edge updates // ******************** // ******************** // corners updates // ******************** if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(old_array.get_haloWidth(1)-(d-idxy)),idxz,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(idxx,(idxy-d),idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,(idxy+d),idxz)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),idxy,idxz,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,old_array.get_haloWidth(1)+idxy,(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(0)))] : old_arraySection[index3D(idxx,idxy,(idxz-d))] ) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,idxy,(idxz+d))] ) ; else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),idxz,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(idxx,(idxy-d),idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,(idxy+d),idxz)] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),idxy,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(idxx,idxy,(idxz-d))] ) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,idxy,(idxz+d))] ) ; } arraySection[index3D(idxx,idxy,idxz)] = tmp / (6.0*dist) ; } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(old_array.get_haloWidth(1)-(d-idxy)),(core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1)),(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(idxx,(idxy-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,(idxy+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,old_array.get_haloWidth(1)+idxy,(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(idxx,(idxy-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(idxx,(idxy+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(idxx,idxy,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(idxx,idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(d-idxy-1),idxz,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),(core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1)),idxz,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(idxx-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D(idxx+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+old_array.get_haloWidth(1)-(idxy+1)),old_array.get_haloWidth(2)-(d-idxz),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))]); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(idxx,(d-idxy-1),idxz,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),idxz,core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(idxx-d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)]) + /* array[Z][Y][X+1] */ old_arraySection[index3D(idxx+d,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))]); } arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][0] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][0][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(d-idxy-1),(core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1)),(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxz+1))]) + /* array[Z][Y][X-1] */ ((d>idxx) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)-(d-idxx)),(core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1)),(core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1)),old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D(old_array.get_haloWidth(0)+idxx,(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+old_array.get_haloWidth(1)-(idxy+1)),(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(idxx,(d-idxy-1),core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxz+1))]) + /* array[Z][Y][X-1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][0]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][1][0][0]-(d-idxx),core->coreArrayNeighborhoodSizes_3D[1][1][0][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][0][0],core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D((idxx-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X+1] */ old_arraySection[index3D((idxx+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z-1][Y][X] */ old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(idxx,core->coreArrayNeighborhoodSizes_3D[2][1][1][1]-(idxy+1),(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(idxx,core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(old_array.get_haloWidth(1)-(d-idxy)),idxz,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),idxy,idxz,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+old_array.get_haloWidth(0)-(idxx+1)),old_array.get_haloWidth(1)+idxy,(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,(idxz+d))] ); else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),idxz,core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),idxz)]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),idxz)] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),idxy,idxz,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,(idxz+d))] ); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,idxz)] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][0] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][0][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_haloYBottom[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(old_array.get_haloWidth(1)-(d-idxy)),(core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1)),(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),idxy,(core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1)),old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+old_array.get_haloWidth(0)-(idxx+1)),old_array.get_haloWidth(1)+idxy,(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][0][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][1]-(d-idxy),core->coreArrayNeighborhoodSizes_3D[1][0][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][0][1][0],core->coreArrayNeighborhoodSizes_3D[1][0][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y+1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),(idxy+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]-(idxx+1),idxy,(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),idxy,core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][0] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[0][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(d-idxy-1),idxz,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),(core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1)),idxz,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_haloZBottom[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+old_array.get_haloWidth(1)-(idxy+1)),(old_array.get_haloWidth(2)-(d-idxz)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))] ); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),idxz)] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]-(idxx+1),(d-idxy-1),idxz,core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),idxz)]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1),idxz,core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)]) + /* array[Z-1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[0][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[0][1][1][2]-(d-idxz),core->coreArrayNeighborhoodSizes_3D[0][1][1][0],core->coreArrayNeighborhoodSizes_3D[0][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz-d))]) + /* array[Z+1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),(idxz+d))] ); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),idxz)] = tmp / (6.0*dist); } } else { } } else { } } else { } } } if ((core->boundaryCore_3D[0][1] == true) || (core->boundaryCore_3D[1][1] == true) || (core->boundaryCore_3D[2][1] == true)) { // processor boundary condition enforced here (YZ upper corner) } else { if ((core->coreArrayNeighborhoodSizes_3D[1][1][2][0] > 0) && (core->coreArrayNeighborhoodSizes_3D[1][2][1][1] > 0) && (core->coreArrayNeighborhoodSizes_3D[2][1][1][2] > 0)) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 1) { if (core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 1) { for(int idxx=0; idxx <dist; idxx++) for(int idxy=0; idxy <dist; idxy++) for(int idxz=0; idxz <dist; idxz++) { T tmp(0.0); for(int d=1; d <=dist; d++) { if(old_array.get_haloSectionSizes(0)[coreID] && old_array.get_haloSectionSizes(1)[coreID] && old_array.get_haloSectionSizes(2)[coreID]) tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_haloYTop[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(d-idxy-1),(core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1)),(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_haloXTop[coreID][otherCore_index3D((d-idxx-1),(core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1)),(core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1)),old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_haloZTop[coreID][otherCore_index3D((core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+old_array.get_haloWidth(0)-(idxx+1)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+old_array.get_haloWidth(1)-(idxy+1)),(d-idxz-1),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(0)))] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); else tmp += ( /* array[Z][Y-1][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y+1][X] */ ((d>idxy) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][2][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]-(idxx+1),(d-idxy-1),core->coreArrayNeighborhoodSizes_3D[1][2][1][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][2][1][0],core->coreArrayNeighborhoodSizes_3D[1][2][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z][Y][X-1] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1+d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] + /* array[Z][Y][X+1] */ ((d>idxx) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[1][1][2]][otherCore_index3D((d-idxx-1),core->coreArrayNeighborhoodSizes_3D[1][1][2][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][2]-(idxz+1),core->coreArrayNeighborhoodSizes_3D[1][1][2][0],core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1-d),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))]) + /* array[Z-1][Y][X] */ old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1+d))] + /* array[Z+1][Y][X] */ ((d>idxz) ? old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_3D[2][1][1]][otherCore_index3D(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[2][1][1][1]-(idxy+1),(d-idxz-1),core->coreArrayNeighborhoodSizes_3D[2][1][1][0],core->coreArrayNeighborhoodSizes_3D[2][1][1][1])] : old_arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1-d))])); } arraySection[index3D(core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-(idxx+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-(idxy+1),core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-(idxz+1))] = tmp / (6.0*dist); } } else { } } else { } } else { } } } // ******************** // End of corner updates // ******************** } else { #if 0 printf ("This array segment can't be processed for edge handling because it is too small in at least one axis: p = %d size = (%d,%d,%d) \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1],core->coreArrayNeighborhoodSizes_2D[1][1][2]); #endif // assert(false); } #if 0 // This is required to avoid valgrind reported errors on some blocks where the local (sectionSize[dim]) is zero. // This is likely because of over flow from size_t type veraibles. if (core->coreArrayNeighborhoodSizes_3D[1][1][1][0] > 2 && core->coreArrayNeighborhoodSizes_3D[1][1][1][1] > 2 && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] > 2) { for (int k = 1; k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-1; k++) { for (int j = 1; j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-1; j++) { for (int i = 1; i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-1; i++) { // This is the dominant computation for each array section per core. The compiler will use the // user's code to derive the code that will be put here. #if 0 printf ("p= %d Indexing 3D array (i,j,k) = (%d,%d,%d) \n",p,i,j,k); #endif #if 0 arraySection[index3D(i,j,k)] = (old_arraySection[index3D(i-1,j-1,k-1)] + old_arraySection[index3D(i+1,j-1,k-1)] + old_arraySection[index3D(i-1,j+1,k-1)] + old_arraySection[index3D(i+1,j+1,k-1)] + old_arraySection[index3D(i-1,j-1,k+1)] + old_arraySection[index3D(i+1,j-1,k+1)] + old_arraySection[index3D(i-1,j+1,k+1)] + old_arraySection[index3D(i+1,j+1,k+1)]) / 8.0; #endif } } } } #endif } else { #if 0 printf ("3D array too small (still no interior) \n"); #endif } } else { if (arraySizeZ == 2) { #if 0 printf ("3D array (with size 2 in Z axis) too small (still no interior) \n"); #endif } else { if (arraySizeY > 2) { if (arraySizeX > 2) { // This is the case of 2D relaxation (along edges) T** old_haloXBottom = old_array.get_haloSectionPointers(0,0); T** old_haloXTop = old_array.get_haloSectionPointers(0,1); T** old_haloYBottom = old_array.get_haloSectionPointers(1,0); T** old_haloYTop = old_array.get_haloSectionPointers(1,1); #if 0 printf ("This is the case of 2D relaxation \n"); printf ("This needs to use sectionSize[0-1] to get the local size instead of the global size! \n"); #endif #if 1 // The core array may higher dimensional then the array and if so then the local size along // the Z axis may be zero. If so, then we don't want to process the local array section. // if ((core->coreArrayNeighborhoodSizes_2D[1][1][0] >= 2 || core->coreArrayNeighborhoodSizes_2D[1][1][1] >= 2) && core->coreArrayNeighborhoodSizes_2D[1][1][2] == 1) if ((core->coreArrayNeighborhoodSizes_2D[1][1][0] >= 1 || core->coreArrayNeighborhoodSizes_2D[1][1][1] >= 1) && core->coreArrayNeighborhoodSizes_2D[1][1][2] == 1) { // Handle the internal boundary equations along edges of the 2D arrays. // *************************************** // Now process the edges along the X axis. // *************************************** // if (sectionSize[1] > 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][1] > 1) { #if 0 printf ("-- leftEdgeSection[1] = %s rightEdgeSection[1] = %s \n",leftEdgeSection[1] ? "true" : "false",rightEdgeSection[1] ? "true" : "false"); #endif // if (leftEdgeSection[1] == true) if (core->boundaryCore_2D[1][0] == true) { #if 0 printf ("--- Apply the 2D array abstraction's UPPER boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. #if 0 printf ("apply 2D equation at left edge of memory segment core->coreArrayNeighborhoodSizes_2D[0][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[0][1][1]); #endif // if (previous_sectionSize[1] > 0) if (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0) { // Upper edge // ***** | ****** | ***** // ---------------------- // ***** | *XXXX* | ***** // ***** | ****** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // arraySection[0] = (old_arraySectionPointers[previous_coreIndexInLinearArray][previous_sectionSize[0]-1] + old_arraySection[1]) / 2.0; // for (int i = 1; i < sectionSize[0]-1; i++) for (int i = 1; i < core->coreArrayNeighborhoodSizes_2D[1][1][0]-1; i++) { // arraySection[index2D(i,0)] = (old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(i-1,previous_sectionSize[1]-1)] + old_arraySection[index2D(i-1,1)] + // old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(i+1,previous_sectionSize[1]-1)] + old_arraySection[index2D(i+1,1)]) / 4.0; if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[0][1]]) arraySection[index2D(i,0)] = ( /* array[Y-1][X] */ old_haloYBottom[coreID][otherCore_index2D((i+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_arraySection[index2D(i,1)] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,0)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,0)]) / 4.0; else arraySection[index2D(i,0)] = ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(i,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySection[index2D(i,1)] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,0)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,0)]) / 4.0; } } } // if (rightEdgeSection[1] == true) if (core->boundaryCore_2D[1][1] == true) { #if 0 printf ("--- Apply the array abstraction's LOWER boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_rightEdge = (left_stencil_cell_rightEdge + right_stencil_cell_rightEdge) / 2.0; #if 0 printf ("apply 2D equation at right edge of memory segment core->coreArrayNeighborhoodSizes_2D[2][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[2][1][1]); #endif // if (next_sectionSize[1] > 0) if (core->coreArrayNeighborhoodSizes_2D[2][1][1] > 0) { // Lower edge // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | ****** | ***** // ***** | *XXXX* | ***** // ---------------------- // ***** | ****** | ***** // for (int i = 1; i < sectionSize[0]-1; i++) for (int i = 1; i < core->coreArrayNeighborhoodSizes_2D[1][1][0]-1; i++) { if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]]) arraySection[index2D(i,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(i,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_haloYTop[coreID][otherCore_index2D(i+old_array.get_haloWidth(0),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; else arraySection[index2D(i,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(i,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(i,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; } } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). // if (sectionSize[1] == 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { #if 0 printf ("--- Trivial case of only one 2D equation (define this to be UPPER edge) \n"); printf ("--- core->boundaryCore_2D[1][0] = %s core->boundaryCore_2D[1][1] = %s \n",core->boundaryCore_2D[1][0] ? "true" : "false",core->boundaryCore_2D[1][1] ? "true" : "false"); #endif // if (leftEdgeSection[1] == false && rightEdgeSection[1] == false) if (core->boundaryCore_2D[1][0] == false && core->boundaryCore_2D[1][1] == false) { // This is where user specific code is places within the compiler transformation. // if (previous_sectionSize[1] > 0 && next_sectionSize[1] > 0) if (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0 && core->coreArrayNeighborhoodSizes_2D[2][1][1] > 0) { // Upper and Lower edges are the same // ***** | ****** | ***** // ---------------------- // ***** | *XXXX* | ***** // ---------------------- // ***** | ****** | ***** #if 0 printf ("--- Processing trivial case of only one equation 2D (edge in X axis) \n"); #endif // for (int i = 1; i < sectionSize[0]-1; i++) for (int i = 1; i < core->coreArrayNeighborhoodSizes_2D[1][1][0]-1; i++) { if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[0][1]] && old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]]) arraySection[index2D(i,0)] = ( /* array[Y-1][X] */ old_haloYBottom[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(i+old_array.get_haloWidth(0),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_haloYTop[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(i+old_array.get_haloWidth(0),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,0)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,0)]) / 4.0; else arraySection[index2D(i,0)] = ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(i,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(i,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(i-1,0)] + /* array[Y][X+1] */ old_arraySection[index2D(i+1,0)]) / 4.0; } } } } else { // assert(sectionSize[1] == 0); assert(core->coreArrayNeighborhoodSizes_2D[1][1][1] == 0); #if 0 printf ("--- core->coreArrayNeighborhoodSizes_2D[1][1][1] == 0: This is the trival case \n"); #endif } } #if 1 // *************************************** // Now process the edges along the Y axis. // *************************************** #if 0 printf ("---+++ Process the edges of the memory section on core index = %d sectionSize[0] = %d previous_sectionSize[0] = %d next_sectionSize[0] = %d \n",p,sectionSize[0],previous_sectionSize[0],next_sectionSize[0]); #endif // if (sectionSize[0] > 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] > 1) { #if 0 printf ("---+++ leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == true) if (core->boundaryCore_2D[0][0] == true) { #if 0 printf ("---+++ Apply the array abstraction's LEFT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_leftEdge = (left_stencil_cell_leftEdge + right_stencil_cell_leftEdge) / 2.0; #if 0 printf ("apply equation at left edge of memory segment core->coreArrayNeighborhoodSizes_2D[1][0][0] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][0][0]); #endif // if (previous_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0) { // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | X***** | ***** // ***** | X***** | ***** // ***** | X***** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // for (int j = 1; j < sectionSize[1]-1; j++) for (int j = 1; j < core->coreArrayNeighborhoodSizes_2D[1][1][1]-1; j++) { #if 1 if(old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][0]]) arraySection[index2D(0,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(0,j+1)] + /* array[Y][X-1] */ old_haloXBottom[coreID][otherCore_index2D((old_array.get_haloWidth(0)-1),j,(old_array.get_haloWidth(0)))] + /* array[Y][X+1] */ old_arraySection[index2D(1,j)]) / 4.0; else arraySection[index2D(0,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(0,j+1)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(1,j)]) / 4.0; #endif } } } // if (rightEdgeSection[0] == true) if (core->boundaryCore_2D[0][1] == true) { #if 0 printf ("---+++ Apply the array abstraction's RIGHT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_rightEdge = (left_stencil_cell_rightEdge + right_stencil_cell_rightEdge) / 2.0; #if 0 printf ("apply equation at right edge of memory segment core->coreArrayNeighborhoodSizes_2D[1][2][0] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][2][0]); #endif // if (next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | *****X | ***** // ***** | *****X | ***** // ***** | *****X | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // for (int j = 1; j < sectionSize[1]-1; j++) for (int j = 1; j < core->coreArrayNeighborhoodSizes_2D[1][1][1]-1; j++) { // arraySection[index2D(sectionSize[0]-1,j)] = (old_arraySection[index2D(sectionSize[0]-2,j-1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j-1)] + // old_arraySection[index2D(sectionSize[0]-2,j+1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j+1)]) / 4.0; #if 0 printf ("array[Y][X]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)]); printf ("array[Y-1][X]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)]); printf ("array[Y+1][X]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)]); printf ("array[Y][X-1]: old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)] = %f \n",old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)]); printf ("p = %d core->coreArrayNeighborhoodLinearized_2D[1][2] = %d \n",p,core->coreArrayNeighborhoodLinearized_2D[1][2]); printf ("p = %d core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0]); printf ("p = %d core->coreArrayNeighborhoodSizes_2D[1][2][0] = %d \n",p,core->coreArrayNeighborhoodSizes_2D[1][2][0]); // printf ("array[Y][X+1]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)] = %f \n",old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)]); printf ("array[Y][X+1]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])] = %f \n", old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])]); #endif #if 1 // This fails for some random problem... if(old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][2]]) arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)] + /* array[Y][X+1] */ old_haloXTop[coreID][otherCore_index2D(0,j,(old_array.get_haloWidth(0)))]) / 4.0; else arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,j+1)] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,j)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). // if (sectionSize[0] == 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1) { #if 0 printf ("---+++ Trivial case of only one equation (define this to be left edge; use the associated references) \n"); printf ("---+++ leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == false && rightEdgeSection[0] == false) if (core->boundaryCore_2D[0][0] == false && core->boundaryCore_2D[0][1] == false) { // This is where user specific code is places within the compiler transformation. // if (previous_sectionSize[0] > 0 && next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0 && core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // ***** | * | ***** // ---------------------- // ***** | * | ***** // ***** | X | ***** // ***** | X | ***** // ***** | X | ***** // ***** | * | ***** // ---------------------- // ***** | * | ***** #if 0 printf ("---+++ Processing trivial case of only one equation \n"); #endif // for (int j = 1; j < sectionSize[1]-1; j++) for (int j = 1; j < core->coreArrayNeighborhoodSizes_2D[1][1][1]-1; j++) { // arraySection[index2D(0,j)] = (old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(previous_sectionSize[0]-1,j-1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j-1)] + // old_arraySectionPointers[previous_coreIndexInLinearArray][index2D(previous_sectionSize[0]-1,j+1)] + old_arraySectionPointers[next_coreIndexInLinearArray][index2D(0,j+1)]) / 4.0; #if 1 if(old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][0]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][2]]) arraySection[index2D(0,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(0,j+1)] + /* array[Y][X-1] */ old_haloXBottom[coreID][otherCore_index2D((old_array.get_haloWidth(0)-1),j,(old_array.get_haloWidth(0)))] + /* array[Y][X+1] */ old_haloXTop[coreID][otherCore_index2D(0,j,(old_array.get_haloWidth(0)))]) / 4.0; else arraySection[index2D(0,j)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,j-1)] + /* array[Y+1][X] */ old_arraySection[index2D(0,j+1)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,j,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,j)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,j,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } } } else { // assert(sectionSize[0] == 0); assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] == 0); #if 0 printf ("---+++ core->coreArrayNeighborhoodSizes_2D[1][0][0] == 0: This is the trival case \n"); #endif } } // ******************** // End of Y Axis update // ******************** #endif #if 1 // ******************************************** // Now process the corners of the X and Y axis. // ******************************************** #if 0 printf ("---+++ Process the edges of the memory section on core p = %d core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d core->coreArrayNeighborhoodSizes_2D[1][0][0] = %d core->coreArrayNeighborhoodSizes_2D[1][2][0] = %d \n", p,core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][0][0],core->coreArrayNeighborhoodSizes_2D[1][2][0]); printf ("Sizes of current processor: core->coreArrayNeighborhoodSizes_2D[1][1] = (%d,%d,%d) \n",core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1],core->coreArrayNeighborhoodSizes_2D[1][1][2]); #endif // First X Axis logic if (core->coreArrayNeighborhoodSizes_2D[1][1][0] > 1) { // Left sice corners if (core->boundaryCore_2D[0][0] == true) { // processor boundary condition enforced here (X axis) } else { if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0) { // Next Y Axis logic if (core->coreArrayNeighborhoodSizes_2D[1][1][1] > 1) { // Upper corner if (core->boundaryCore_2D[1][0] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][0] > 0); assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); // Upper left corner // ***** | ****** | ***** // ---------------------- // ***** | X***** | ***** // ***** | ****** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[0][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][0]]) arraySection[index2D(0,0)] = ( /* array[Y-1][X] */ old_haloYBottom[coreID][otherCore_index2D(0+old_array.get_haloWidth(0),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_arraySection[index2D(1,0)] + /* array[Y][X-1] */ old_haloXBottom[coreID][otherCore_index2D((old_array.get_haloWidth(0)-1),0,(old_array.get_haloWidth(0)))] + /* array[Y][X+1] */ old_arraySection[index2D(0,1)]) / 4.0; else arraySection[index2D(0,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySection[index2D(1,0)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(0,1)]) / 4.0; #endif } // Lower corner if (core->boundaryCore_2D[1][1] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); assert (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0); // Lower left corner // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | ****** | ***** // ***** | X***** | ***** // ---------------------- // ***** | ****** | ***** #if 0 printf ("--- array[Y][X]: arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); printf ("old_array[Y][X]: old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); printf ("old_array[Y-1][X]: old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] = %f \n",old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)]); printf ("old_array[Y+1][X]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(0,0)] = %f \n",old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(0,0)]); printf ("old_array[Y][X-1]: old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1)] = %f \n", old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1)]); printf ("array[Y][X+1]: old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); #endif #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][0]]) arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_haloYTop[coreID][otherCore_index2D(0+old_array.get_haloWidth(0),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_haloXBottom[coreID][otherCore_index2D((old_array.get_haloWidth(0)-1),(core->coreArrayNeighborhoodSizes_2D[1][0][1]-1),(old_array.get_haloWidth(0)))] + /* array[Y][X+1] */ old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; else arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(0,0)] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,core->coreArrayNeighborhoodSizes_2D[1][0][1]-1,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; #endif #if 0 printf ("--- array[Y][X]: arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = %f \n",arraySection[index2D(0,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]); #endif } } else { // printf ("core->coreArrayNeighborhoodSizes_2D[1][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][1]); if (core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { // Case of upper and lower left corners are the same point // ***** | ****** | ***** // ---------------------- // ***** | X***** | ***** // ---------------------- // ***** | ****** | ***** #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][0]] && old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][0]]) arraySection[index2D(0,0)] = ( /* array[Y-1][X] */ old_haloYBottom[coreID][otherCore_index2D(0+old_array.get_haloWidth(0),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_haloYTop[coreID][otherCore_index2D(0+old_array.get_haloWidth(0),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_haloXBottom[coreID][otherCore_index2D((old_array.get_haloWidth(0)-1),0,(old_array.get_haloWidth(0)))] + /* array[Y][X+1] */ old_arraySection[index2D(1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)]) / 4.0; else arraySection[index2D(0,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + // /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,0)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])] + // /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0)] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySection[index2D(1,0)]) / 4.0; #endif } } } else { printf ("We don't support the size on the adjacent being zero! \n"); assert(false); } } // Right side corners if (core->boundaryCore_2D[0][1] == true) { // Can we test if this is realy a boundary? } else { // if (next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // printf ("Right boundary corner not implemented! \n"); // Next Y Axis logic if (core->coreArrayNeighborhoodSizes_2D[1][1][1] > 1) { // Upper corner if (core->boundaryCore_2D[1][0] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][0] > 0); assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); // Upper right corner // ***** | ****** | ***** // ---------------------- // ***** | *****X | ***** // ***** | ****** | ***** // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[0][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][2]]) arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = ( /* array[Y-1][X] */ old_haloYBottom[coreID][otherCore_index2D((core->coreArrayNeighborhoodSizes_2D[0][1][0]-1+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,1)] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,0)] + /* array[Y][X+1] */ old_haloXTop[coreID][otherCore_index2D(0,0,(old_array.get_haloWidth(0)))]) / 4.0; else arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,1)] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,0)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,0)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } // Lower corner if (core->boundaryCore_2D[1][1] == true) { // processor boundary condition enforced here (Y axis) } else { assert (core->coreArrayNeighborhoodSizes_2D[0][1][1] > 0); assert (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0); // Lower right corner // ***** | ****** | ***** // ---------------------- // ***** | ****** | ***** // ***** | ****** | ***** // ***** | *****X | ***** // ---------------------- // ***** | ****** | ***** #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][2]]) arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + /* array[Y+1][X] */ old_haloYTop[coreID][otherCore_index2D((core->coreArrayNeighborhoodSizes_2D[2][1][0]-1+old_array.get_haloWidth(0)),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] + /* array[Y][X+1] */ old_haloXTop[coreID][otherCore_index2D(0,(core->coreArrayNeighborhoodSizes_2D[2][1][1]-1),(old_array.get_haloWidth(0)))]) / 4.0; else arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] = ( /* array[Y-1][X] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,core->coreArrayNeighborhoodSizes_2D[1][1][1]-2)] + // /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,core->coreArrayNeighborhoodSizes_2D[1][1][1]-1)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,core->coreArrayNeighborhoodSizes_2D[2][1][1]-1)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[2][1][1]-1,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } else { // printf ("core->coreArrayNeighborhoodSizes_2D[1][1][1] = %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][1]); if (core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { // Case of upper and lower right corners are the same point // ***** | ****** | ***** // ---------------------- // ***** | *****X | ***** // ---------------------- // ***** | ****** | ***** #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[0][1]] && old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][2]]) arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = ( /* array[Y-1][X] */ old_haloYBottom[coreID][otherCore_index2D((core->coreArrayNeighborhoodSizes_2D[0][1][0]-1+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_haloYTop[coreID][otherCore_index2D((core->coreArrayNeighborhoodSizes_2D[2][1][0]-1+old_array.get_haloWidth(0)),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,0)] + /* array[Y][X+1] */ old_haloXTop[coreID][otherCore_index2D(0,0,(old_array.get_haloWidth(0)))]) / 4.0; else arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = // ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1)] + ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[0][1][0]-1,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + // /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0)] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[2][1][0]-1,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-2,0)] + // /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][index2D(0,0)]) / 4.0; /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } } } else { printf ("We don't support the size on the adjacent being zero! \n"); assert(false); } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). #if 0 printf ("Case of core->coreArrayNeighborhoodSizes_2D[1][1][0] == %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][0]); printf ("Case of core->coreArrayNeighborhoodSizes_2D[1][1][1] == %d \n",core->coreArrayNeighborhoodSizes_2D[1][1][1]); #endif // assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1); // assert(core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1); // if (sectionSize[0] == 1) // if (core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1) if (core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1 && core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1) { // printf ("Case of core->coreArrayNeighborhoodSizes_2D[1][1][0] == 1 && core->coreArrayNeighborhoodSizes_2D[1][1][1] == 1\n"); // if (leftEdgeSection[0] == false && rightEdgeSection[0] == false) // if (core->boundaryCore_2D[0][0] == false && core->boundaryCore_2D[0][1] == false) if (core->boundaryCore_2D[0][0] == false && core->boundaryCore_2D[0][1] == false && core->boundaryCore_2D[1][0] == false && core->boundaryCore_2D[1][1] == false) { // if (previous_sectionSize[0] > 0 && next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_2D[1][0][0] > 0 && core->coreArrayNeighborhoodSizes_2D[1][2][0] > 0) { // printf ("Case of single point boundary not implemented! \n"); // ***** | * | ***** // ----------------- // ***** | X | ***** // ----------------- // ***** | * | ***** #if 1 if(old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[0][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][0]] && old_array.get_haloSectionSizes(1)[core->coreArrayNeighborhoodLinearized_2D[2][1]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_2D[1][2]]) arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = ( /* array[Y-1][X] */ old_haloYBottom[coreID][otherCore_index2D((0+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)-1),(core->coreArrayNeighborhoodSizes_2D[0][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y+1][X] */ old_haloYTop[coreID][otherCore_index2D((0+old_array.get_haloWidth(0)),0,(core->coreArrayNeighborhoodSizes_2D[2][1][0]+2*old_array.get_haloWidth(0)))] + /* array[Y][X-1] */ old_haloXBottom[coreID][otherCore_index2D((old_array.get_haloWidth(0)-1),(old_array.get_haloWidth(1)-1),(old_array.get_haloWidth(0)))] + /* array[Y][X+1] */ old_haloXTop[coreID][otherCore_index2D(0,0,(old_array.get_haloWidth(0)))]) / 4.0; else arraySection[index2D(core->coreArrayNeighborhoodSizes_2D[1][1][0]-1,0)] = ( /* array[Y-1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[0][1]][otherCore_index2D(0,core->coreArrayNeighborhoodSizes_2D[0][1][1]-1,core->coreArrayNeighborhoodSizes_2D[0][1][0])] + /* array[Y+1][X] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[2][1]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[2][1][0])] + /* array[Y][X-1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][0]][otherCore_index2D(core->coreArrayNeighborhoodSizes_2D[1][0][0]-1,0,core->coreArrayNeighborhoodSizes_2D[1][0][0])] + /* array[Y][X+1] */ old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_2D[1][2]][otherCore_index2D(0,0,core->coreArrayNeighborhoodSizes_2D[1][2][0])]) / 4.0; #endif } #if 0 printf ("Exiting as a test! \n"); assert(false); #endif } } else { // assert(sectionSize[0] == 0); if (core->coreArrayNeighborhoodSizes_2D[1][1][0] != 0) { #if 0 printf ("Warning: p = %d core->coreArrayNeighborhoodSizes_2D[1][1][0] = %d \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0]); #endif } // assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] == 0); assert(core->coreArrayNeighborhoodSizes_2D[1][1][0] <= 1); #if 0 printf ("---+++ core->coreArrayNeighborhoodSizes_2D[1][0][0] == 0: This is the trival case \n"); #endif } } // ************************************************** // End of processing the corners of the X and Y axis. // ************************************************** #endif } else { #if 0 printf ("This array segment can't be processed for edge handling because it is too small in at least one axis: p = %d size = (%d,%d,%d) \n",p,core->coreArrayNeighborhoodSizes_2D[1][1][0],core->coreArrayNeighborhoodSizes_2D[1][1][1],core->coreArrayNeighborhoodSizes_2D[1][1][2]); #endif // assert(false); } #endif } else { #if 0 printf ("2D array too small (still no interior) \n"); #endif } } else { if (arraySizeY == 2) { #if 0 printf ("2D array (with size 2 in Y axis) too small (still no interior) \n"); #endif } else { if (arraySizeX > 2) { // This is the case of 1D relaxation T** old_haloXBottom = old_array.get_haloSectionPointers(0,0); T** old_haloXTop = old_array.get_haloSectionPointers(0,1); #if 0 printf ("--- This is the case of 1D relaxation sectionSize[0] = %d \n",sectionSize[0]); #endif // The core array may higher dimensional then the array and if so then the local size along either // the Y or Z axis may be zero. If so, then we don't want to process the local array section. // if (sectionSize[1] == 1 && sectionSize[2] == 1) // if (sectionSize[0] > 0 && ((sectionSize[1] == 1 && sectionSize[2] == 1) || array.get_tableBasedDistribution() == false)) // if (sectionSize[0] > 0 && (sectionSize[1] == 1 && sectionSize[2] == 1) ) if (core->coreArrayNeighborhoodSizes_1D[1][0] > 0 && (core->coreArrayNeighborhoodSizes_1D[1][1] == 1 && core->coreArrayNeighborhoodSizes_1D[1][2] == 1) ) { #if 0 printf ("--- Process the edges of the memory section on core index = %d sectionSize[0] = %d previous_sectionSize[0] = %d next_sectionSize[0] = %d \n",p,sectionSize[0],previous_sectionSize[0],next_sectionSize[0]); #endif // if (sectionSize[0] > 1) if (core->coreArrayNeighborhoodSizes_1D[1][0] > 1) { #if 0 printf ("-- leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == true) if (core->boundaryCore_1D[0] == true) { #if 0 printf ("--- Apply the array abstraction's LEFT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_leftEdge = (left_stencil_cell_leftEdge + right_stencil_cell_leftEdge) / 2.0; #if 0 printf ("apply equation at left edge of memory segment previous_sectionSize[0] = %d \n",previous_sectionSize[0]); #endif // if (previous_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_1D[0][0] > 0) { if(old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_1D[0]]) arraySection[0] = (old_haloXBottom[coreID][0] + old_arraySection[1]) / 2.0; else // arraySection[0] = (old_arraySectionPointers[previous_coreIndexInLinearArray][core->coreArrayNeighborhoodSizes_1D[0][0]-1] + old_arraySection[1]) / 2.0; arraySection[0] = (old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[0]][core->coreArrayNeighborhoodSizes_1D[0][0]-1] + old_arraySection[1]) / 2.0; } } // if (rightEdgeSection[0] == true) if (core->boundaryCore_1D[1] == true) { #if 0 printf ("--- Apply the array abstraction's RIGHT boundary condition \n"); #endif } else { // This is where user specific code is places within the compiler transformation. // center_stencil_cell_rightEdge = (left_stencil_cell_rightEdge + right_stencil_cell_rightEdge) / 2.0; #if 0 printf ("apply equation at right edge of memory segment next_sectionSize[0] = %d \n",next_sectionSize[0]); #endif // if (next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_1D[2][0] > 0) { if(old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_1D[2]]) arraySection[core->coreArrayNeighborhoodSizes_1D[1][0]-1] = (old_arraySection[core->coreArrayNeighborhoodSizes_1D[1][0]-2] + old_haloXTop[coreID][0]) / 2.0; else // arraySection[sectionSize[0]-1] = (old_arraySection[sectionSize[0]-2] + old_arraySectionPointers[next_coreIndexInLinearArray][0]) / 2.0; arraySection[core->coreArrayNeighborhoodSizes_1D[1][0]-1] = (old_arraySection[core->coreArrayNeighborhoodSizes_1D[1][0]-2] + old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[2]][0]) / 2.0; } } } else { // Trivial case of only one equation (define this to be left edge; use the associated references). // if (sectionSize[0] == 1) if (core->coreArrayNeighborhoodSizes_1D[1][0] == 1) { #if 0 printf ("--- Trivial case of only one equation (define this to be left edge; use the associated references) \n"); printf ("--- leftEdgeSection[0] = %s rightEdgeSection[0] = %s \n",leftEdgeSection[0] ? "true" : "false",rightEdgeSection[0] ? "true" : "false"); #endif // if (leftEdgeSection[0] == false && rightEdgeSection[0] == false) if (core->boundaryCore_1D[0] == false && core->boundaryCore_1D[1] == false) { // This is where user specific code is places within the compiler transformation. // if (previous_sectionSize[0] > 0 && next_sectionSize[0] > 0) if (core->coreArrayNeighborhoodSizes_1D[0][0] > 0 && core->coreArrayNeighborhoodSizes_1D[2][0] > 0) { #if 0 printf ("--- Processing trivial case of only one equation \n"); #endif // arraySection[0] = (old_arraySectionPointers[previous_coreIndexInLinearArray][previous_sectionSize[0]-1] + old_arraySectionPointers[next_coreIndexInLinearArray][0]) / 2.0; if(old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_1D[0]] && old_array.get_haloSectionSizes(0)[core->coreArrayNeighborhoodLinearized_1D[2]]) arraySection[0] = (old_haloXBottom[coreID][0] + old_haloXTop[coreID][0]) / 2.0; else arraySection[0] = (old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[0]][core->coreArrayNeighborhoodSizes_1D[0][0]-1] + old_arraySectionPointers[core->coreArrayNeighborhoodLinearized_1D[2]][0]) / 2.0; } } } else { // assert(sectionSize[0] == 0); assert(core->coreArrayNeighborhoodSizes_1D[1][0] == 0); #if 0 printf ("--- sectionSize[0] == 0: This is the trival case \n"); #endif } } } else { #if 0 printf ("--- The local size for this arraySection is zero in either the Y or Z axis sectionSize[1] = %d sectionSize[2] = %d \n",sectionSize[1],sectionSize[2]); #endif } } else { // This is array does not have an interior upon which to relax. #if 0 printf ("--- 1D array too small (still no interior) \n"); #endif } } } } } #endif #if 0 array.display("after relaxation on memory section edges: array"); old_array.display("after relaxation on memory section edges: old_array"); #endif } template <typename T> void relax_on_detachedhalo_boundary_simplified( int coreID, MulticoreArray<T> & array, MulticoreArray<T> & old_array, int dist ) { const int arraySizeX = array.get_arraySize(0); const int arraySizeY = array.get_arraySize(1); const int arraySizeZ = array.get_arraySize(2); int p = coreID; Core<T>* core = array.coreArray[coreID]; // This lifts out loop invariant portions of the code. T** arraySectionPointers = array.get_arraySectionPointers(); T** old_arraySectionPointers = old_array.get_arraySectionPointers(); assert(arraySectionPointers != NULL); assert(old_arraySectionPointers != NULL); T* arraySection = array.get_arraySectionPointers()[p]; T* old_arraySection = old_array.get_arraySectionPointers()[p]; #if 0 printf ("\nIterate over all cores: p = %d arraySection = %p old_arraySection = %p \n",p,arraySection,old_arraySection); #endif assert(arraySection != NULL); assert(old_arraySection != NULL); // ************************************************************** // Fixup internal bounaries of the memory allocated to each core. // ************************************************************** #if 0 printf ("Fixup boundaries: p = %d Array size (%d,%d,%d) sectionSize(%d,%d,%d) coreArray(%d,%d,%d) \n",p,arraySizeX,arraySizeY,arraySizeZ,sectionSize[0],sectionSize[1],sectionSize[2],array.get_coreArraySize(0),array.get_coreArraySize(1),array.get_coreArraySize(2)); #endif if (arraySizeZ > (2*dist)) { if (arraySizeY > (2*dist) && arraySizeX > (2*dist)) { // This is the case of 3D relaxation #if 0 printf ("This is the case of 3D relaxation \n"); // Iterate on the interior of the section (non-shared memory operation, local to the closest local memory declared for each core). printf ("This needs to use sectionSize[0-2] to get the local size instead of the global size! \n"); #endif if ((core->coreArrayNeighborhoodSizes_3D[1][1][1][0] >= 1 || core->coreArrayNeighborhoodSizes_3D[1][1][1][1] >= 1) && core->coreArrayNeighborhoodSizes_3D[1][1][1][2] >= 1) { T** old_haloXBottom = old_array.get_haloSectionPointers(0,0); T** old_haloXTop = old_array.get_haloSectionPointers(0,1); T** old_haloYBottom = old_array.get_haloSectionPointers(1,0); T** old_haloYTop = old_array.get_haloSectionPointers(1,1); T** old_haloZBottom = old_array.get_haloSectionPointers(2,0); T** old_haloZTop = old_array.get_haloSectionPointers(2,1); int base_X = (core->boundaryCore_3D[0][0] == true) ? dist : 0; int bound_X = (core->boundaryCore_3D[0][1] == true) ? core->coreArrayNeighborhoodSizes_3D[1][1][1][0] - (dist): core->coreArrayNeighborhoodSizes_3D[1][1][1][0]; int base_Y = (core->boundaryCore_3D[1][0] == true) ? dist : 0; int bound_Y = (core->boundaryCore_3D[1][1] == true) ? core->coreArrayNeighborhoodSizes_3D[1][1][1][1] - (dist): core->coreArrayNeighborhoodSizes_3D[1][1][1][1]; int base_Z = (core->boundaryCore_3D[2][0] == true) ? dist : 0; int bound_Z = (core->boundaryCore_3D[2][1] == true) ? core->coreArrayNeighborhoodSizes_3D[1][1][1][2] - (dist): core->coreArrayNeighborhoodSizes_3D[1][1][1][2]; for (int k = base_Z; k < bound_Z; k++) { for (int j = base_Y; j < bound_Y; j++) { for (int i = base_X; i < bound_X; i++) { if((i >= dist) && (i < core->coreArrayNeighborhoodSizes_3D[1][1][1][0]-dist) && (j >= dist) && (j < core->coreArrayNeighborhoodSizes_3D[1][1][1][1]-dist) && (k >= dist) && (k < core->coreArrayNeighborhoodSizes_3D[1][1][1][2]-dist)) continue; T tmp(0.0); for(int d=1; d <=dist; d++) { tmp += ( /* array[Z][Y][X-d] */ ((i-d < 0) ? old_haloXBottom[coreID][otherCore_index3D((old_array.get_haloWidth(0)+(i-d)),j,k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][0][1])] : old_arraySection[index3D(i-d,j,k)]) + /* array[Z][Y][X+d] */ ((i+d >= core->coreArrayNeighborhoodSizes_3D[1][1][1][0]) ? old_haloXTop[coreID][otherCore_index3D((i+d-core->coreArrayNeighborhoodSizes_3D[1][1][2][0]),j,k,old_array.get_haloWidth(0),core->coreArrayNeighborhoodSizes_3D[1][1][2][1])] : old_arraySection[index3D(i+d,j,k)]) + /* array[Z][Y-d][X] */ ((j-d < 0) ? old_haloYBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(old_array.get_haloWidth(1)+(j-d)),k,(core->coreArrayNeighborhoodSizes_3D[1][0][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,j-d,k)]) + /* array[Z][Y+d][X] */ ((j+d >= core->coreArrayNeighborhoodSizes_3D[1][1][1][1]) ? old_haloYTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(j+d-core->coreArrayNeighborhoodSizes_3D[1][2][1][2]),k,(core->coreArrayNeighborhoodSizes_3D[1][2][1][0]+2*old_array.get_haloWidth(0)),old_array.get_haloWidth(1))] : old_arraySection[index3D(i,j+d,k)]) + /* array[Z-d][Y][X] */ ((k-d < 0) ? old_haloZBottom[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(j+old_array.get_haloWidth(1)),(old_array.get_haloWidth(2)+(k-d)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[0][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,j,k-d)]) + /* array[Z+d][Y][X] */ ((k+d >= core->coreArrayNeighborhoodSizes_3D[1][1][1][2]) ? old_haloZTop[coreID][otherCore_index3D((i+old_array.get_haloWidth(0)),(j+old_array.get_haloWidth(1)),(k+d-core->coreArrayNeighborhoodSizes_3D[2][1][1][2]),(core->coreArrayNeighborhoodSizes_3D[2][1][1][0]+2*old_array.get_haloWidth(0)),(core->coreArrayNeighborhoodSizes_3D[2][1][1][1]+2*old_array.get_haloWidth(1)))] : old_arraySection[index3D(i,j,k+d)]) ); } arraySection[index3D(i,j,k)] = tmp / (6.0*dist); } } } } } } }
declare_variant_messages.c
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s // RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s #pragma omp declare // expected-error {{expected an OpenMP directive}} int foo(void); #pragma omp declare variant // expected-error {{expected '(' after 'declare variant'}} #pragma omp declare variant( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} #pragma omp declare variant(foo // expected-error {{expected ')'}} expected-error {{expected 'match' clause on 'omp declare variant' directive}} expected-note {{to match this '('}} #pragma omp declare variant(x) // expected-error {{use of undeclared identifier 'x'}} #pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) xxx // expected-error {{expected 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) match // expected-error {{expected '(' after 'match'}} #pragma omp declare variant(foo) match( // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match() // expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx=) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx=yyy) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx=yyy}) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(xxx={) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx={vvv, vvv}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx={vvv} xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx={vvv}) xxx // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(implementation={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'implementation'; selector ignored}} expected-note {{context selector options are: 'vendor' 'extension' 'unified_address' 'unified_shared_memory' 'reverse_offload' 'dynamic_allocators' 'atomic_default_mem_order'}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(implementation={vendor}) // expected-warning {{the context selector 'vendor' in context set 'implementation' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(implementation={vendor(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(implementation={vendor()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} #pragma omp declare variant(foo) match(implementation={vendor(score ibm)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} #pragma omp declare variant(foo) match(implementation={vendor(score( ibm)}) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(implementation={vendor(score(2 ibm)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(implementation={vendor(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}} #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), vendor(llvm)}) // expected-warning {{the context selector 'vendor' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'vendor' used here}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), kind(cpu)}) // expected-warning {{the context selector 'kind' is not valid for the context set 'implementation'; selector ignored}} expected-note {{the context selector 'kind' can be nested in the context set 'device'; try 'match(device={kind(property)})'}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(device={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'device'; selector ignored}} expected-note {{context selector options are: 'kind' 'isa' 'arch'}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(device={kind}) // expected-warning {{the context selector 'kind' in context set 'device' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(device={kind(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(device={kind()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} #pragma omp declare variant(foo) match(device={kind(score cpu)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<invalid>'); score ignored}} #pragma omp declare variant(foo) match(device={kind(score( ibm)}) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<invalid>'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(device={kind(score(2 gpu)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('2'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(device={kind(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo()'); score ignored}} expected-warning {{'ibm' is not a valid context property for the context selector 'kind' and the context set 'device'; property ignored}} expected-note {{try 'match(implementation={vendor(ibm)})'}} expected-note {{the ignored property spans until here}} #pragma omp declare variant(foo) match(device={kind(score(5): host), kind(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'kind' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'kind' used here}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(device={kind(score(5): nohost), vendor(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'vendor' is not valid for the context set 'device'; selector ignored}} expected-note {{the context selector 'vendor' can be nested in the context set 'implementation'; try 'match(implementation={vendor(property)})'}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(implementation={extension("aaa")}) // expected-warning {{'aaa' is not a valid context property for the context selector 'extension' and the context set 'implementation'; property ignored}} expected-note {{context property options are: 'match_all' 'match_any' 'match_none'}} expected-note {{the ignored property spans until here}} int bar(void); #pragma omp declare variant(foo) match(implementation = {vendor(score(foo) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo is not and will be ignored}} #pragma omp declare variant(foo) match(implementation = {vendor(score(foo()) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}} #pragma omp declare variant(foo) match(implementation = {vendor(score(<expr>) :llvm)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}} #pragma omp declare variant(foo) match(user = {condition(foo)}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo is not}} #pragma omp declare variant(foo) match(user = {condition(foo())}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo() is not}} #pragma omp declare variant(foo) match(user = {condition(<expr>)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}} expected-note {{the ignored selector spans until here}} int score_and_cond_non_const(); #pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int a; // expected-error {{'#pragma omp declare variant' can only be applied to functions}} #pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp threadprivate(a) // expected-error {{'#pragma omp declare variant' can only be applied to functions}} int var; #pragma omp threadprivate(var) #pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma omp declare // expected-error {{expected an OpenMP directive}} #pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma options align=packed int main(); #pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma init_seg(compiler) int main(); #pragma omp declare variant(foo) match(xxx={}) // expected-error {{single declaration is expected after 'declare variant' directive}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int b, c; int no_proto(); #pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int no_proto_too(); int proto1(int); #pragma omp declare variant(proto1) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int diff_proto(); // expected-note {{previous declaration is here}} int diff_proto(double); // expected-error {{conflicting types for 'diff_proto'}} #pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int diff_proto1(double); int after_use_variant(void); int after_use(); int bar() { return after_use(); } #pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied for function after first usage; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int after_use(void); #pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int defined(void) { return 0; } int defined1(void) { return 0; } #pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied to the function that was defined already; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int defined1(void); int diff_cc_variant(void); #pragma omp declare variant(diff_cc_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'int (void) __attribute__((vectorcall))'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} __vectorcall int diff_cc(void); int diff_ret_variant(void); #pragma omp declare variant(diff_ret_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'void (void)'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} void diff_ret(void); void marked(void); void not_marked(void); #pragma omp declare variant(not_marked) match(implementation={vendor(unknown)}, device={kind(cpu)}) // expected-note {{marked as 'declare variant' here}} void marked_variant(void); #pragma omp declare variant(marked_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{variant function in '#pragma omp declare variant' is itself marked as '#pragma omp declare variant'}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} void marked(void); #pragma omp declare variant(foo) match(device = {isa("foo")}) int unknown_isa_trait(void); #pragma omp declare variant(foo) match(device = {isa(foo)}) int unknown_isa_trait2(void); #pragma omp declare variant(foo) match(device = {kind(fpga), isa(bar)}) int ignored_isa_trait(void); void caller() { unknown_isa_trait(); // expected-warning {{isa trait 'foo' is not known to the current target; verify the spelling or consider restricting the context selector with the 'arch' selector further}} unknown_isa_trait2(); // expected-warning {{isa trait 'foo' is not known to the current target; verify the spelling or consider restricting the context selector with the 'arch' selector further}} ignored_isa_trait(); } #pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}} // FIXME: If the scores are equivalent we should detect that and allow it. #pragma omp begin declare variant match(implementation = {vendor(score(2) \ : llvm)}) #pragma omp declare variant(foo) match(implementation = {vendor(score(2) \ : llvm)}) // expected-error@-1 {{nested OpenMP context selector contains duplicated trait 'llvm' in selector 'vendor' and set 'implementation' with different score}} int conflicting_nested_score(void); #pragma omp end declare variant // FIXME: We should build the conjuction of different conditions, see also the score fixme above. #pragma omp begin declare variant match(user = {condition(1)}) #pragma omp declare variant(foo) match(user = {condition(1)}) // expected-error {{nested user conditions in OpenMP context selector not supported (yet)}} int conflicting_nested_condition(void); #pragma omp end declare variant
GB_unaryop__lnot_int8_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int8_fp32 // op(A') function: GB_tran__lnot_int8_fp32 // C type: int8_t // A type: float // cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ float #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int8_t z ; GB_CAST_SIGNED(z,aij,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int8_fp32 ( int8_t *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int8_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_uint16_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint16_bool // op(A') function: GB_tran__lnot_uint16_bool // C type: uint16_t // A type: bool // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint16_bool ( uint16_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__isfinite_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isfinite_bool_fp64 // op(A') function: GB_unop_tran__isfinite_bool_fp64 // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isfinite (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isfinite (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isfinite (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isfinite_bool_fp64 ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isfinite_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 0; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) #define PARALLEL_A() { \ _Pragma("omp parallel num_threads(33) if (0)") \ { \ int tid = omp_get_thread_num(); \ int cs = N / omp_get_num_threads(); \ int lb = tid * cs; \ int ub = (tid+1)*cs; \ ub = ub > N ? N : ub; \ for (int i = lb; i < ub; i++) { \ A[i] = D[i]; \ } \ _Pragma("omp barrier") \ double sum = 0; \ for (int i = 1+tid; i < N; i++) { \ sum += A[i]; \ } \ _Pragma("omp barrier") \ A[tid] = sum; \ sum = 0; \ for (int i = 2+tid; i < N; i++) { \ sum += A[i]; \ } \ _Pragma("omp barrier") \ A[tid+1] = sum; \ _Pragma("omp barrier") \ B[tid] = A[tid]-A[tid+1]; \ } \ } #define BODY_B() { \ int tid = omp_get_thread_num(); \ int cs = N / omp_get_num_threads(); \ int lb = tid * cs; \ int ub = (tid+1)*cs; \ ub = ub > N ? N : ub; \ for (int i = lb; i < ub; i++) { \ A[i] = D[i]; \ } \ _Pragma("omp barrier") \ double sum = 0; \ for (int i = 1+tid; i < N; i++) { \ sum += A[i]; \ } \ _Pragma("omp barrier") \ A[tid] = sum; \ _Pragma("omp barrier") \ C[tid] = A[tid]-A[tid+1]-tid; \ if (tid < omp_get_num_threads()-1) B[tid] += C[tid]; \ } #define PARALLEL_B() { \ _Pragma("omp parallel num_threads(threads[0])") \ { \ BODY_B(); \ } \ } #define PARALLEL_B5() { PARALLEL_B() PARALLEL_B() PARALLEL_B() PARALLEL_B() PARALLEL_B() } #define BODY_NP() { \ _Pragma("omp parallel num_threads(16)") { \ int b = omp_get_thread_num()*16; \ _Pragma("omp parallel num_threads(16)") { \ int tid = omp_get_thread_num(); \ int cs = N / omp_get_num_threads(); \ int lb = tid * cs; \ int ub = (tid+1)*cs; \ ub = ub > N ? N : ub; \ for (int i = lb; i < ub; i++) { \ A[i] = D[i]; \ } \ _Pragma("omp barrier") \ double sum = 0; \ for (int i = 1+tid; i < N; i++) { \ sum += A[i]; \ } \ _Pragma("omp barrier") \ A[tid] = sum; \ _Pragma("omp barrier") \ C[tid] = A[tid]-A[tid+1]-tid; \ if (tid < omp_get_num_threads()-1) B[b+tid] += C[tid]; else B[b+tid]=0; \ } \ } \ } int main(void) { check_offloading(); double A[N+2], B[N+2], C[N+2], D[N+2], E[N+2]; INIT(); long cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int gpu_threads = 768; int cpu_threads = 32; int max_threads = cpuExec ? cpu_threads : gpu_threads; // // Test: Barrier in a serialized parallel region. // TESTD("omp target teams num_teams(1) thread_limit(32)", { PARALLEL_A() }, VERIFY(0, 1, B[i], i+1)); // // Test: Barrier in a parallel region. // for (int t = 1; t <= max_threads; t += (t < 32) ? 31 : 32) { int threads[1]; threads[0] = t; TESTD("omp target teams num_teams(1) thread_limit(max_threads)", { ZERO(B); PARALLEL_B5() }, VERIFY(0, threads[0]-1, B[i], 5)); } DUMP_SUCCESS(gpu_threads-max_threads); // // Test: Barrier in consecutive parallel regions with variable # of threads. // TESTD("omp target teams num_teams(1) thread_limit(max_threads)", { ZERO(B); for (int t = 32; t <= max_threads; t += 32) { int threads[1]; threads[0] = t; PARALLEL_B() } }, VERIFY(0, max_threads-1, B[i], (max_threads / 32) - (i+1) / 32)); // // Test: Single thread in target region. // TESTD("#pragma omp target", { ZERO(B); BODY_B() }, VERIFY(0, 1, C[i], 491535)); // // Test: Barrier in target parallel. // for (int t = 1; t <= max_threads; t += (t < 32) ? 31 : 32) { ZERO(B); int threads; threads = t; TESTD("omp target parallel num_threads(threads)", { BODY_B(); }, VERIFY(0, t-1, B[i], (trial+1)*1.0)); } DUMP_SUCCESS(gpu_threads-max_threads); #if 0 // // Test: Barrier in nested parallel in target region. // if (!cpuExec) { ZERO(B); TEST({ BODY_NP(); }, VERIFY(0, 16*16, B[i], (i > 0 && (i+1) % 16 == 0 ? 0 : (trial+1)*1)) ); } else { DUMP_SUCCESS(1); } #endif DUMP_SUCCESS(1); // target parallel + parallel // target + simd // target/teams/parallel with varying numbers of threads }
ordered-2.c
/* { dg-do compile } */ void f1(void) { #pragma omp ordered asdf /* { dg-error "expected" } */ #pragma omp ordered /* { dg-error "region may not be closely nested inside of" } */ } /* { dg-error "expected expression" } */
raytracer.h
#pragma once #include "resource.h" #include <linalg.h> #include <memory> #include <omp.h> #include <random> #include <stdio.h> #include <time.h> using namespace linalg::aliases; namespace cg::renderer { struct ray { ray(float3 position, float3 direction) : position(position) { this->direction = normalize(direction); } float3 position; float3 direction; }; struct payload { float t; float3 bary; cg::color color; size_t depth; }; template<typename VB> struct triangle { triangle(const VB& vertex_a, const VB& vertex_b, const VB& vertex_c); float3 a; float3 b; float3 c; float3 ba; float3 ca; float3 na; float3 nb; float3 nc; float3 ambient; float3 diffuse; float3 emissive; }; template<typename VB> inline triangle<VB>::triangle(const VB& vertex_a, const VB& vertex_b, const VB& vertex_c) { a = float3{ vertex_a.x, vertex_a.y, vertex_a.z }; b = float3{ vertex_b.x, vertex_b.y, vertex_b.z }; c = float3{ vertex_c.x, vertex_c.y, vertex_c.z }; ba = b - a; ca = c - a; na = float3{ vertex_a.nx, vertex_a.ny, vertex_a.nz }; nb = float3{ vertex_b.nx, vertex_b.ny, vertex_b.nz }; nc = float3{ vertex_c.nx, vertex_c.ny, vertex_c.nz }; ambient = { vertex_a.ambient_r, vertex_a.ambient_g, vertex_a.ambient_b, }; diffuse = { vertex_a.diffuse_r, vertex_a.diffuse_g, vertex_a.diffuse_b, }; emissive = { vertex_a.emissive_r, vertex_a.emissive_g, vertex_a.emissive_b, }; } template<typename VB> class aabb { public: void add_triangle(const triangle<VB> triangle); const std::vector<triangle<VB>>& get_traingles() const; bool aabb_test(const ray& ray) const; protected: std::vector<triangle<VB>> triangles; float3 aabb_min; float3 aabb_max; }; struct light { float3 position; float3 color; }; template<typename VB, typename RT> class raytracer { public: raytracer(){}; ~raytracer(){}; void set_render_target(std::shared_ptr<resource<RT>> in_render_target); void clear_render_target(const RT& in_clear_value); void set_viewport(size_t in_width, size_t in_height); void set_per_shape_vertex_buffer(std::vector<std::shared_ptr<cg::resource<VB>>> in_per_shape_vertex_buffer); void build_acceleration_structure(); std::vector<aabb<VB>> acceleration_structures; // std::vector<triangle<VB>> acceleration_structures; void ray_generation(float3 position, float3 direction, float3 right, float3 up); payload trace_ray(const ray& ray, size_t depth, float max_t = 1000.f, float min_t = 0.001f) const; payload intersection_shader(const triangle<VB>& triangle, const ray& ray) const; std::function<payload(const ray& ray)> miss_shader = nullptr; std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle)> closest_hit_shader = nullptr; std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle)> any_hit_shader = nullptr; float get_random(const int thread_num, float range = 0.1f) const; // You can alter this number to enable/disable super-sampling anti-aliasing. // Value of 1 means SSAA is disabled. int SSAA_factor = 16; // Max amount of times the same ray may be re-casted. int max_depth = 5; protected: std::shared_ptr<cg::resource<RT>> render_target; std::vector<std::shared_ptr<cg::resource<VB>>> per_shape_vertex_buffer; size_t width = 1920; size_t height = 1080; }; template<typename VB, typename RT> inline void raytracer<VB, RT>::set_render_target(std::shared_ptr<resource<RT>> in_render_target) { render_target = in_render_target; } template<typename VB, typename RT> inline void raytracer<VB, RT>::clear_render_target(const RT& in_clear_value) { for (size_t i = 0; i < render_target->get_number_of_elements(); i++) { render_target->item(i) = in_clear_value; } } template<typename VB, typename RT> inline void raytracer<VB, RT>::set_per_shape_vertex_buffer(std::vector<std::shared_ptr<cg::resource<VB>>> in_per_shape_vertex_buffer) { per_shape_vertex_buffer = in_per_shape_vertex_buffer; } template<typename VB, typename RT> inline void raytracer<VB, RT>::build_acceleration_structure() { for (auto& vertex_buffer : per_shape_vertex_buffer) { size_t vertex_id = 0; aabb<VB> aabb; while (vertex_id < vertex_buffer->get_number_of_elements()) { triangle<VB> triangle(vertex_buffer->item(vertex_id++), vertex_buffer->item(vertex_id++), vertex_buffer->item(vertex_id++)); aabb.add_triangle(triangle); } acceleration_structures.push_back(aabb); } } template<typename VB, typename RT> inline void raytracer<VB, RT>::set_viewport(size_t in_width, size_t in_height) { width = in_width; height = in_height; } template<typename VB, typename RT> inline void raytracer<VB, RT>::ray_generation(float3 position, float3 direction, float3 right, float3 up) { for (int x = 0; x < width; x++) { #pragma omp parallel for for (int y = 0; y < height; y++) { float3 res_color(0.f); for (int px = 0; px < SSAA_factor; px++) for (int py = 0; py < SSAA_factor; py++) { // float x_jitter = get_random(omp_get_thread_num() + clock()); // float y_jitter = get_random(omp_get_thread_num() + clock()); // Converting from [0; width - 1] to [-1, 1]: // [0; width - 1] -> [0; 1] -> [0; 2] -> [-1; 1] float u = 2.f * (x + px / static_cast<float>(SSAA_factor)) / static_cast<float>(width - 1) - 1.f; u *= static_cast<float>(width) / static_cast<float>(height); float v = 2.f * (y + py / static_cast<float>(SSAA_factor)) / static_cast<float>(height - 1) - 1.f; float3 ray_direction = direction + (u)*right - (v)*up; ray ray(position, ray_direction); payload payload = trace_ray(ray, max_depth); res_color += float3(payload.color.r, payload.color.g, payload.color.b); // cg::color accumed = // cg::color::from_float3(render_target->item(x, // y).to_float3()); cg::color result{ (accumed.r * 4.f + // payload.color.r) / 5.f, (accumed.g * 4.f + // payload.color.g) / 5.f, (accumed.b * 4.f + // payload.color.b) / 5.f, // }; } render_target->item(x, y) = RT::from_color(cg::color::from_float3(res_color / SSAA_factor / SSAA_factor)); } printf("\rProgress: %.2f%%", 100.f * x / width); } } template<typename VB, typename RT> inline payload raytracer<VB, RT>::trace_ray(const ray& ray, size_t depth, float max_t, float min_t) const { if (depth == 0) return miss_shader(ray); depth--; payload closest_hit_payload = {}; closest_hit_payload.t = max_t; const triangle<VB>* closest_triangle = nullptr; for (auto& aabb : acceleration_structures) { if (!aabb.aabb_test(ray)) continue; for (auto& triangle : aabb.get_traingles()) { payload payload = intersection_shader(triangle, ray); if (payload.t > min_t && payload.t < closest_hit_payload.t) { closest_hit_payload = payload; closest_triangle = &triangle; if (any_hit_shader) { return any_hit_shader(ray, payload, triangle); } } } } if (closest_hit_payload.t < max_t) { if (closest_hit_shader) { closest_hit_payload.depth = depth; return closest_hit_shader(ray, closest_hit_payload, *closest_triangle); } } return miss_shader(ray); } template<typename VB, typename RT> inline payload raytracer<VB, RT>::intersection_shader(const triangle<VB>& triangle, const ray& ray) const { payload payload{}; payload.t = -1.f; float3 pvec = cross(ray.direction, triangle.ca); float det = dot(triangle.ba, pvec); // No intersection; return empty payload if (det > -1e-8 && det < 1e-8) return payload; float inv_det = 1.f / det; float3 tvec = ray.position - triangle.a; float u = dot(tvec, pvec) * inv_det; if (u < 0.f || u > 1.f) return payload; float3 qvec = cross(tvec, triangle.ba); float v = dot(ray.direction, qvec) * inv_det; if (v < 0.f || (u + v) > 1.f) return payload; payload.t = dot(triangle.ca, qvec) * inv_det; payload.bary = float3{ 1.f - u - v, u, v }; return payload; } template<typename VB, typename RT> inline float raytracer<VB, RT>::get_random(const int thread_num, const float range) const { static std::default_random_engine generator(thread_num); static std::normal_distribution<float> distribution(0.f, range); return distribution(generator); } template<typename VB> inline void aabb<VB>::add_triangle(const triangle<VB> triangle) { if (triangles.empty()) aabb_max = aabb_min = triangle.a; triangles.push_back(triangle); aabb_max = max(triangle.a, aabb_max); aabb_max = max(triangle.b, aabb_max); aabb_max = max(triangle.c, aabb_max); aabb_min = min(triangle.a, aabb_min); aabb_min = min(triangle.b, aabb_min); aabb_min = min(triangle.c, aabb_min); } template<typename VB> inline const std::vector<triangle<VB>>& aabb<VB>::get_traingles() const { return triangles; } template<typename VB> inline bool aabb<VB>::aabb_test(const ray& ray) const { float3 inv_ray_direction = float3(1.f) / ray.direction; float3 t0 = (aabb_max - ray.position) * inv_ray_direction; float3 t1 = (aabb_min - ray.position) * inv_ray_direction; float3 tmin = min(t0, t1); float3 tmax = max(t0, t1); return maxelem(tmin) <= minelem(tmax); } } // namespace cg::renderer
constitute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE % % C O O NN N SS T I T U U T E % % C O O N N N ESSS T I T U U T EEE % % C O O N NN SS T I T U U T E % % CCCC OOO N N SSSSS T IIIII T UUU T EEEEE % % % % % % MagickCore Methods to Consitute an Image % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/client.h" #include "MagickCore/coder-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/constitute-private.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/identify.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "ios_error.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n s t i t u t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteImage() returns an image from the pixel data you supply. % The pixel data must be in scanline order top-to-bottom. The data can be % char, short int, int, float, or double. Float and double require the % pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to % create a 640x480 image from unsigned red-green-blue character data, use: % % image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception); % % The format of the ConstituteImage method is: % % Image *ConstituteImage(const size_t columns,const size_t rows, % const char *map,const StorageType storage,const void *pixels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: width in pixels of the image. % % o rows: height in pixels of the image. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose % from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConstituteImage(const size_t columns,const size_t rows, const char *map,const StorageType storage,const void *pixels, ExceptionInfo *exception) { Image *image; MagickBooleanType status; register ssize_t i; size_t length; /* Allocate image structure. */ assert(map != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map); assert(pixels != (void *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage((ImageInfo *) NULL,exception); if (image == (Image *) NULL) return((Image *) NULL); switch (storage) { case CharPixel: image->depth=8*sizeof(unsigned char); break; case DoublePixel: image->depth=8*sizeof(double); break; case FloatPixel: image->depth=8*sizeof(float); break; case LongPixel: image->depth=8*sizeof(unsigned long); break; case LongLongPixel: image->depth=8*sizeof(MagickSizeType); break; case ShortPixel: image->depth=8*sizeof(unsigned short); break; default: break; } length=strlen(map); for (i=0; i < (ssize_t) length; i++) { switch (map[i]) { case 'a': case 'A': case 'O': case 'o': { image->alpha_trait=BlendPixelTrait; break; } case 'C': case 'c': case 'm': case 'M': case 'Y': case 'y': case 'K': case 'k': { image->colorspace=CMYKColorspace; break; } case 'I': case 'i': { image->colorspace=GRAYColorspace; break; } default: { if (length == 1) image->colorspace=GRAYColorspace; break; } } } status=SetImageExtent(image,columns,rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImage() returns all the properties of an image or image sequence % except for the pixels. It is much faster and consumes far less memory % than ReadImage(). On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the PingImage method is: % % Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Ping the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static size_t PingStream(const Image *magick_unused(image), const void *magick_unused(pixels),const size_t columns) { magick_unreferenced(image); magick_unreferenced(pixels); return(columns); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport Image *PingImage(const ImageInfo *image_info, ExceptionInfo *exception) { Image *image; ImageInfo *ping_info; assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); ping_info=CloneImageInfo(image_info); ping_info->ping=MagickTrue; image=ReadStream(ping_info,&PingStream,exception); if (image != (Image *) NULL) { ResetTimer(&image->timer); if (ping_info->verbose != MagickFalse) (void) IdentifyImage(image,thread_stdout,MagickFalse,exception); } ping_info=DestroyImageInfo(ping_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i n g I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PingImages() pings one or more images and returns them as an image list. % % The format of the PingImage method is: % % Image *PingImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PingImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char ping_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Ping image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename, (int) image_info->scene,ping_filename,exception); if (LocaleCompare(ping_filename,image_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ read_info=CloneImageInfo(image_info); sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes == 0) { read_info=DestroyImageInfo(read_info); return(PingImage(image_info,exception)); } (void) CopyMagickString(ping_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename, (int) scene,read_info->filename,exception); image=PingImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } return(PingImage(image_info,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImage() reads an image or image sequence from a file or file handle. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadImage method is: % % Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Read the image defined by the file or filename members of % this structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType IsCoderAuthorized(const char *coder, const PolicyRights rights,ExceptionInfo *exception) { if (IsRightsAuthorized(CoderPolicyDomain,rights,coder) == MagickFalse) { errno=EPERM; (void) ThrowMagickException(exception,GetMagickModule(),PolicyError, "NotAuthorized","`%s'",coder); return(MagickFalse); } return(MagickTrue); } MagickExport Image *ReadImage(const ImageInfo *image_info, ExceptionInfo *exception) { char filename[MagickPathExtent], magick[MagickPathExtent], magick_filename[MagickPathExtent]; const char *value; const DelegateInfo *delegate_info; const MagickInfo *magick_info; DecodeImageHandler *decoder; ExceptionInfo *sans_exception; GeometryInfo geometry_info; Image *image, *next; ImageInfo *read_info; MagickBooleanType status; MagickStatusType flags; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image_info->filename != (char *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); (void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent); (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(filename,read_info->filename,MagickPathExtent); (void) CopyMagickString(magick,read_info->magick,MagickPathExtent); /* Call appropriate image reader based on image type. */ sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(read_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(read_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) read_info->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } if ((magick_info != (const MagickInfo *) NULL) && (GetMagickDecoderSeekableStream(magick_info) != MagickFalse)) { image=AcquireImage(read_info,exception); (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } if (IsBlobSeekable(image) == MagickFalse) { /* Coder requires a seekable stream. */ *read_info->filename='\0'; status=ImageToFile(image,read_info->filename,exception); if (status == MagickFalse) { (void) CloseBlob(image); read_info=DestroyImageInfo(read_info); image=DestroyImage(image); return((Image *) NULL); } read_info->temporary=MagickTrue; } (void) CloseBlob(image); image=DestroyImage(image); } image=NewImageList(); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) SetImageInfo(read_info,0,exception); (void) CopyMagickString(read_info->filename,filename, MagickPathExtent); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); } } if (decoder != (DecodeImageHandler *) NULL) { /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=decoder(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); if (read_info->temporary != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Let our decoding delegate process the image. */ image=AcquireImage(read_info,exception); if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return((Image *) NULL); } (void) CopyMagickString(image->filename,read_info->filename, MagickPathExtent); *read_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(read_info,image,read_info->magick,(char *) NULL, exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); image=DestroyImageList(image); read_info->temporary=MagickTrue; if (status != MagickFalse) (void) SetImageInfo(read_info,0,exception); magick_info=GetMagickInfo(read_info->magick,exception); decoder=GetImageDecoder(magick_info); if (decoder == (DecodeImageHandler *) NULL) { if (IsPathAccessible(read_info->filename) != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'", read_info->magick); else ThrowFileException(exception,FileOpenError,"UnableToOpenFile", read_info->filename); read_info=DestroyImageInfo(read_info); return((Image *) NULL); } /* Call appropriate image reader based on image type. */ if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(read_info->magick,ReadPolicyRights,exception); image=(Image *) NULL; if (status != MagickFalse) image=(decoder)(read_info,exception); if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } if (read_info->temporary != MagickFalse) { (void) RelinquishUniqueFileResource(read_info->filename); read_info->temporary=MagickFalse; if (image != (Image *) NULL) (void) CopyMagickString(image->filename,filename,MagickPathExtent); } if (image == (Image *) NULL) { read_info=DestroyImageInfo(read_info); return(image); } if (exception->severity >= ErrorException) (void) LogMagickEvent(ExceptionEvent,GetMagickModule(), "Coder (%s) generated an image despite an error (%d), " "notify the developers",image->magick,exception->severity); if (IsBlobTemporary(image) != MagickFalse) (void) RelinquishUniqueFileResource(read_info->filename); if ((IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse) && (GetImageListLength(image) != 1)) { Image *clones; clones=CloneImages(image,read_info->scenes,exception); if (clones != (Image *) NULL) { image=DestroyImageList(image); image=GetFirstImageInList(clones); } } for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { char magick_path[MagickPathExtent], *property, timestamp[MagickPathExtent]; const char *option; const StringInfo *profile; ssize_t option_type; static const char *source_date_epoch = (const char *) NULL; static MagickBooleanType epoch_initalized = MagickFalse; next->taint=MagickFalse; GetPathComponent(magick_filename,MagickPath,magick_path); if (*magick_path == '\0' && *next->magick == '\0') (void) CopyMagickString(next->magick,magick,MagickPathExtent); (void) CopyMagickString(next->magick_filename,magick_filename, MagickPathExtent); if (IsBlobTemporary(image) != MagickFalse) (void) CopyMagickString(next->filename,filename,MagickPathExtent); if (next->magick_columns == 0) next->magick_columns=next->columns; if (next->magick_rows == 0) next->magick_rows=next->rows; (void) GetImageProperty(next,"exif:*",exception); (void) GetImageProperty(next,"icc:*",exception); (void) GetImageProperty(next,"iptc:*",exception); (void) GetImageProperty(next,"xmp:*",exception); value=GetImageProperty(next,"exif:Orientation",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:Orientation",exception); if (value != (char *) NULL) { next->orientation=(OrientationType) StringToLong(value); (void) DeleteImageProperty(next,"tiff:Orientation"); (void) DeleteImageProperty(next,"exif:Orientation"); } value=GetImageProperty(next,"exif:XResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.x; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.x=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.x=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:XResolution"); } value=GetImageProperty(next,"exif:YResolution",exception); if (value != (char *) NULL) { geometry_info.rho=next->resolution.y; geometry_info.sigma=1.0; flags=ParseGeometry(value,&geometry_info); if (geometry_info.sigma != 0) next->resolution.y=geometry_info.rho/geometry_info.sigma; if (strchr(value,',') != (char *) NULL) next->resolution.y=geometry_info.rho+geometry_info.sigma/1000.0; (void) DeleteImageProperty(next,"exif:YResolution"); } value=GetImageProperty(next,"exif:ResolutionUnit",exception); if (value == (char *) NULL) value=GetImageProperty(next,"tiff:ResolutionUnit",exception); if (value != (char *) NULL) { option_type=ParseCommandOption(MagickResolutionOptions,MagickFalse, value); if (option_type >= 0) next->units=(ResolutionType) option_type; (void) DeleteImageProperty(next,"exif:ResolutionUnit"); (void) DeleteImageProperty(next,"tiff:ResolutionUnit"); } if (next->page.width == 0) next->page.width=next->columns; if (next->page.height == 0) next->page.height=next->rows; option=GetImageOption(read_info,"caption"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"caption",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"comment"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"comment",property,exception); property=DestroyString(property); } option=GetImageOption(read_info,"label"); if (option != (const char *) NULL) { property=InterpretImageProperties(read_info,next,option,exception); (void) SetImageProperty(next,"label",property,exception); property=DestroyString(property); } if (LocaleCompare(next->magick,"TEXT") == 0) (void) ParseAbsoluteGeometry("0x0+0+0",&next->page); if ((read_info->extract != (char *) NULL) && (read_info->stream == (StreamHandler) NULL)) { RectangleInfo geometry; SetGeometry(next,&geometry); flags=ParseAbsoluteGeometry(read_info->extract,&geometry); if ((next->columns != geometry.width) || (next->rows != geometry.height)) { if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { Image *crop_image; crop_image=CropImage(next,&geometry,exception); if (crop_image != (Image *) NULL) ReplaceImageInList(&next,crop_image); } else if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0)) { Image *size_image; flags=ParseRegionGeometry(next,read_info->extract,&geometry, exception); size_image=ResizeImage(next,geometry.width,geometry.height, next->filter,exception); if (size_image != (Image *) NULL) ReplaceImageInList(&next,size_image); } } } profile=GetImageProfile(next,"icc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"icm"); profile=GetImageProfile(next,"iptc"); if (profile == (const StringInfo *) NULL) profile=GetImageProfile(next,"8bim"); if (epoch_initalized == MagickFalse) { source_date_epoch=getenv("SOURCE_DATE_EPOCH"); epoch_initalized=MagickTrue; } if (source_date_epoch == (const char *) NULL) { (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_mtime, MagickPathExtent,timestamp); (void) SetImageProperty(next,"date:modify",timestamp,exception); (void) FormatMagickTime((time_t) GetBlobProperties(next)->st_ctime, MagickPathExtent,timestamp); (void) SetImageProperty(next,"date:create",timestamp,exception); } option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (next->delay > (size_t) floor(geometry_info.rho+0.5)) next->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (next->delay < (size_t) floor(geometry_info.rho+0.5)) next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else next->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) { option_type=ParseCommandOption(MagickDisposeOptions,MagickFalse, option); if (option_type >= 0) next->dispose=(DisposeType) option_type; } if (read_info->verbose != MagickFalse) (void) IdentifyImage(next,thread_stderr,MagickFalse,exception); image=next; } read_info=DestroyImageInfo(read_info); if (GetBlobError(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadImages() reads one or more images and returns them as an image list. % % The format of the ReadImage method is: % % Image *ReadImages(ImageInfo *image_info,const char *filename, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char read_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Read image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); read_info=CloneImageInfo(image_info); *read_info->magick='\0'; (void) SetImageOption(read_info,"filename",filename); (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(read_info,(Image *) NULL,filename, (int) read_info->scene,read_filename,exception); if (LocaleCompare(read_filename,read_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes != 0) { (void) CopyMagickString(read_filename,read_info->filename, MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); scene=(ssize_t) read_info->scene; for ( ; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL, read_filename,(int) scene,read_info->filename,exception); image=ReadImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } } (void) CopyMagickString(read_info->filename,filename,MagickPathExtent); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d I n l i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadInlineImage() reads a Base64-encoded inline image or image sequence. % The method returns a NULL if there is a memory shortage or if the image % cannot be read. On failure, a NULL image is returned and exception % describes the reason for the failure. % % The format of the ReadInlineImage method is: % % Image *ReadInlineImage(const ImageInfo *image_info,const char *content, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o content: the image encoded in Base64. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReadInlineImage(const ImageInfo *image_info, const char *content,ExceptionInfo *exception) { Image *image; ImageInfo *read_info; unsigned char *blob; size_t length; register const char *p; /* Skip over header (e.g. data:image/gif;base64,). */ image=NewImageList(); for (p=content; (*p != ',') && (*p != '\0'); p++) ; if (*p == '\0') ThrowReaderException(CorruptImageError,"CorruptImage"); p++; length=0; blob=Base64Decode(p,&length); if (length == 0) { blob=(unsigned char *) RelinquishMagickMemory(blob); ThrowReaderException(CorruptImageError,"CorruptImage"); } read_info=CloneImageInfo(image_info); (void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL, (void *) NULL); *read_info->filename='\0'; *read_info->magick='\0'; image=BlobToImage(read_info,blob,length,exception); blob=(unsigned char *) RelinquishMagickMemory(blob); read_info=DestroyImageInfo(read_info); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImage() writes an image or an image sequence to a file or file handle. % If writing to a file is on disk, the name is defined by the filename member % of the image structure. WriteImage() returns MagickFalse is there is a % memory shortage or if the image cannot be written. Check the exception % member of image to determine the cause for any failure. % % The format of the WriteImage method is: % % MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char filename[MagickPathExtent]; const char *option; const DelegateInfo *delegate_info; const MagickInfo *magick_info; EncodeImageHandler *encoder; ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType status, temporary; /* Determine image type from filename prefix or suffix (e.g. image.jpg). */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); sans_exception=AcquireExceptionInfo(); write_info=CloneImageInfo(image_info); (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) SetImageInfo(write_info,1,sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); /* Call appropriate image writer based on image type. */ magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if (magick_info != (const MagickInfo *) NULL) { if (GetMagickEndianSupport(magick_info) == MagickFalse) image->endian=UndefinedEndian; else if ((image_info->endian == UndefinedEndian) && (GetMagickRawSupport(magick_info) != MagickFalse)) { unsigned long lsb_first; lsb_first=1; image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian; } } (void) SyncImageProfiles(image); DisassociateImageStream(image); option=GetImageOption(image_info,"delegate:bimodal"); if ((IsStringTrue(option) != MagickFalse) && (write_info->page == (char *) NULL) && (GetPreviousImageInList(image) == (Image *) NULL) && (GetNextImageInList(image) == (Image *) NULL) && (IsTaintImage(image) == MagickFalse) ) { delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception); if ((delegate_info != (const DelegateInfo *) NULL) && (GetDelegateMode(delegate_info) == 0) && (IsPathAccessible(image->magick_filename) != MagickFalse)) { /* Process image with bi-modal delegate. */ (void) CopyMagickString(image->filename,image->magick_filename, MagickPathExtent); status=InvokeDelegate(write_info,image,image->magick, write_info->magick,exception); write_info=DestroyImageInfo(write_info); (void) CopyMagickString(image->filename,filename,MagickPathExtent); return(status); } } status=MagickFalse; temporary=MagickFalse; if ((magick_info != (const MagickInfo *) NULL) && (GetMagickEncoderSeekableStream(magick_info) != MagickFalse)) { char image_filename[MagickPathExtent]; (void) CopyMagickString(image_filename,image->filename,MagickPathExtent); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); (void) CopyMagickString(image->filename, image_filename,MagickPathExtent); if (status != MagickFalse) { if (IsBlobSeekable(image) == MagickFalse) { /* A seekable stream is required by the encoder. */ write_info->adjoin=MagickTrue; (void) CopyMagickString(write_info->filename,image->filename, MagickPathExtent); (void) AcquireUniqueFilename(image->filename); temporary=MagickTrue; } (void) CloseBlob(image); } } encoder=GetImageEncoder(magick_info); if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights,exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } else { delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception); if (delegate_info != (DelegateInfo *) NULL) { /* Process the image with delegate. */ *write_info->filename='\0'; if (GetDelegateThreadSupport(delegate_info) == MagickFalse) LockSemaphoreInfo(delegate_info->semaphore); status=InvokeDelegate(write_info,image,(char *) NULL, write_info->magick,exception); if (GetDelegateThreadSupport(delegate_info) == MagickFalse) UnlockSemaphoreInfo(delegate_info->semaphore); (void) CopyMagickString(image->filename,filename,MagickPathExtent); } else { sans_exception=AcquireExceptionInfo(); magick_info=GetMagickInfo(write_info->magick,sans_exception); if (sans_exception->severity == PolicyError) magick_info=GetMagickInfo(write_info->magick,exception); sans_exception=DestroyExceptionInfo(sans_exception); if ((write_info->affirm == MagickFalse) && (magick_info == (const MagickInfo *) NULL)) { (void) CopyMagickString(write_info->magick,image->magick, MagickPathExtent); magick_info=GetMagickInfo(write_info->magick,exception); } encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) { char extension[MagickPathExtent]; GetPathComponent(image->filename,ExtensionPath,extension); if (*extension != '\0') magick_info=GetMagickInfo(extension,exception); else magick_info=GetMagickInfo(image->magick,exception); (void) CopyMagickString(image->filename,filename, MagickPathExtent); encoder=GetImageEncoder(magick_info); } if (encoder == (EncodeImageHandler *) NULL) { magick_info=GetMagickInfo(image->magick,exception); encoder=GetImageEncoder(magick_info); if (encoder == (EncodeImageHandler *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"NoEncodeDelegateForThisImageFormat", "`%s'",write_info->magick); } if (encoder != (EncodeImageHandler *) NULL) { /* Call appropriate image writer based on image type. */ if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) LockSemaphoreInfo(magick_info->semaphore); status=IsCoderAuthorized(write_info->magick,WritePolicyRights, exception); if (status != MagickFalse) status=encoder(write_info,image,exception); if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse) UnlockSemaphoreInfo(magick_info->semaphore); } } } if (temporary != MagickFalse) { /* Copy temporary image file to permanent. */ status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception); if (status != MagickFalse) { (void) RelinquishUniqueFileResource(write_info->filename); status=ImageToFile(image,write_info->filename,exception); } (void) CloseBlob(image); (void) RelinquishUniqueFileResource(image->filename); (void) CopyMagickString(image->filename,write_info->filename, MagickPathExtent); } if ((LocaleCompare(write_info->magick,"info") != 0) && (write_info->verbose != MagickFalse)) (void) IdentifyImage(image,thread_stdout,MagickFalse,exception); write_info=DestroyImageInfo(write_info); if (GetBlobError(image) != MagickFalse) ThrowWriterException(FileOpenError,"UnableToWriteFile"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteImages() writes an image sequence into one or more files. While % WriteImage() can write an image sequence, it is limited to writing % the sequence into a single file using a format which supports multiple % frames. WriteImages(), however, does not have this limitation, instead it % generates multiple output files if necessary (or when requested). When % ImageInfo's adjoin flag is set to MagickFalse, the file name is expected % to include a printf-style formatting string for the frame number (e.g. % "image%02d.png"). % % The format of the WriteImages method is: % % MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o images: the image list. % % o filename: the image filename. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info, Image *images,const char *filename,ExceptionInfo *exception) { #define WriteImageTag "Write/Image" ExceptionInfo *sans_exception; ImageInfo *write_info; MagickBooleanType proceed; MagickOffsetType progress; MagickProgressMonitor progress_monitor; MagickSizeType number_images; MagickStatusType status; register Image *p; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); write_info=CloneImageInfo(image_info); *write_info->magick='\0'; images=GetFirstImageInList(images); if (filename != (const char *) NULL) for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) (void) CopyMagickString(p->filename,filename,MagickPathExtent); (void) CopyMagickString(write_info->filename,images->filename, MagickPathExtent); sans_exception=AcquireExceptionInfo(); (void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images), sans_exception); sans_exception=DestroyExceptionInfo(sans_exception); if (*write_info->magick == '\0') (void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent); p=images; for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p)) { register Image *next; next=GetNextImageInList(p); if (next == (Image *) NULL) break; if (p->scene >= next->scene) { register ssize_t i; /* Generate consistent scene numbers. */ i=(ssize_t) images->scene; for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) p->scene=(size_t) i++; break; } } /* Write images. */ status=MagickTrue; progress_monitor=(MagickProgressMonitor) NULL; progress=0; number_images=GetImageListLength(images); for (p=images; p != (Image *) NULL; p=GetNextImageInList(p)) { if (number_images != 1) progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL, p->client_data); status&=WriteImage(write_info,p,exception); if (number_images != 1) (void) SetImageProgressMonitor(p,progress_monitor,p->client_data); if (write_info->adjoin != MagickFalse) break; if (number_images != 1) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(p,WriteImageTag,progress,number_images); if (proceed == MagickFalse) break; } } write_info=DestroyImageInfo(write_info); return(status != 0 ? MagickTrue : MagickFalse); }
cp-tree.h
/* Definitions for C++ parsing and type checking. Copyright (C) 1987-2016 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_CP_TREE_H #define GCC_CP_TREE_H #include "tm.h" #include "hard-reg-set.h" #include "function.h" /* In order for the format checking to accept the C++ front end diagnostic framework extensions, you must include this file before diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE in c-common.h. */ #undef GCC_DIAG_STYLE #define GCC_DIAG_STYLE __gcc_cxxdiag__ #if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H) #error \ In order for the format checking to accept the C++ front end diagnostic \ framework extensions, you must include this file before diagnostic-core.h and \ c-common.h, not after. #endif #include "c-family/c-common.h" #include "diagnostic.h" /* A tree node, together with a location, so that we can track locations (and ranges) during parsing. The location is redundant for node kinds that have locations, but not all node kinds do (e.g. constants, and references to params, locals, etc), so we stash a copy here. */ class cp_expr { public: cp_expr () : m_value (NULL), m_loc (UNKNOWN_LOCATION) {} cp_expr (tree value) : m_value (value), m_loc (EXPR_LOCATION (m_value)) {} cp_expr (tree value, location_t loc): m_value (value), m_loc (loc) {} cp_expr (const cp_expr &other) : m_value (other.m_value), m_loc (other.m_loc) {} /* Implicit conversions to tree. */ operator tree () const { return m_value; } tree & operator* () { return m_value; } tree & operator-> () { return m_value; } tree get_value () const { return m_value; } location_t get_location () const { return m_loc; } location_t get_start () const { source_range src_range = get_range_from_loc (line_table, m_loc); return src_range.m_start; } location_t get_finish () const { source_range src_range = get_range_from_loc (line_table, m_loc); return src_range.m_finish; } void set_location (location_t loc) { protected_set_expr_location (m_value, loc); m_loc = loc; } void set_range (location_t start, location_t finish) { set_location (make_location (m_loc, start, finish)); } private: tree m_value; location_t m_loc; }; inline bool operator == (const cp_expr &lhs, tree rhs) { return lhs.get_value () == rhs; } #include "name-lookup.h" /* Usage of TREE_LANG_FLAG_?: 0: IDENTIFIER_MARKED (IDENTIFIER_NODEs) NEW_EXPR_USE_GLOBAL (in NEW_EXPR). COND_EXPR_IS_VEC_DELETE (in COND_EXPR). DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR). COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR). CLEANUP_P (in TRY_BLOCK) AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR) PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF) PAREN_STRING_LITERAL (in STRING_CST) CP_DECL_THREAD_LOCAL_P (in VAR_DECL) KOENIG_LOOKUP_P (in CALL_EXPR) STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST). EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT) STMT_EXPR_NO_SCOPE (in STMT_EXPR) BIND_EXPR_TRY_BLOCK (in BIND_EXPR) TYPENAME_IS_ENUM_P (in TYPENAME_TYPE) OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD, OMP_DISTRIBUTE, and OMP_TASKLOOP) BASELINK_QUALIFIED_P (in BASELINK) TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR) TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX) TREE_INDIRECT_USING (in a TREE_LIST of using-directives) ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute) ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag) CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR) LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR) DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE) VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR) DECL_OVERRIDE_P (in FUNCTION_DECL) IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR) TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR) CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR) OVL_ARG_DEPENDENT (in OVERLOAD) PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION) TINFO_HAS_ACCESS_ERRORS (in TEMPLATE_INFO) SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR) COMPOUND_REQ_NOEXCEPT_P (in COMPOUND_REQ) WILDCARD_PACK_P (in WILDCARD_DECL) BLOCK_OUTER_CURLY_BRACE_P (in BLOCK) FOLD_EXPR_MODOP_P (*_FOLD_EXPR) 1: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE) TI_PENDING_TEMPLATE_FLAG. TEMPLATE_PARMS_FOR_INLINE. DELETE_EXPR_USE_VEC (in DELETE_EXPR). (TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out). ICS_ELLIPSIS_FLAG (in _CONV) DECL_INITIALIZED_P (in VAR_DECL) TYPENAME_IS_CLASS_P (in TYPENAME_TYPE) STMT_IS_FULL_EXPR_P (in _STMT) TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR) LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR) DECL_FINAL_P (in FUNCTION_DECL) QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF) DECLTYPE_FOR_INIT_CAPTURE (in DECLTYPE_TYPE) CONSTRUCTOR_NO_IMPLICIT_ZERO (in CONSTRUCTOR) TINFO_USED_TEMPLATE_ID (in TEMPLATE_INFO) PACK_EXPANSION_SIZEOF_P (in *_PACK_EXPANSION) 2: IDENTIFIER_OPNAME_P (in IDENTIFIER_NODE) ICS_THIS_FLAG (in _CONV) DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL) STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST) TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE) TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR) FNDECL_USED_AUTO (in FUNCTION_DECL) DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE) REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF, SCOPE_REF) AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR) CONSTRUCTOR_MUTABLE_POISON (in CONSTRUCTOR) 3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out). ICS_BAD_FLAG (in _CONV) FN_TRY_BLOCK_P (in TRY_BLOCK) IDENTIFIER_CTOR_OR_DTOR_P (in IDENTIFIER_NODE) BIND_EXPR_BODY_BLOCK (in BIND_EXPR) DECL_NON_TRIVIALLY_INITIALIZED_P (in VAR_DECL) CALL_EXPR_LIST_INIT_P (in CALL_EXPR, AGGR_INIT_EXPR) 4: TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR, or FIELD_DECL). IDENTIFIER_TYPENAME_P (in IDENTIFIER_NODE) DECL_TINFO_P (in VAR_DECL) FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE) 5: C_IS_RESERVED_WORD (in IDENTIFIER_NODE) DECL_VTABLE_OR_VTT_P (in VAR_DECL) FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE) 6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE) DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL) TYPE_MARKED_P (in _TYPE) RANGE_FOR_IVDEP (in RANGE_FOR_STMT) Usage of TYPE_LANG_FLAG_?: 0: TYPE_DEPENDENT_P 1: TYPE_HAS_USER_CONSTRUCTOR. 2: TYPE_HAS_LATE_RETURN_TYPE (in FUNCTION_TYPE, METHOD_TYPE) TYPE_PTRMEMFUNC_FLAG (in RECORD_TYPE) 3: TYPE_FOR_JAVA. 4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR 5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE) ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE) AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM) REFERENCE_VLA_OK (in REFERENCE_TYPE) 6: TYPE_DEPENDENT_P_VALID Usage of DECL_LANG_FLAG_?: 0: DECL_ERROR_REPORTED (in VAR_DECL). DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL) DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL) DECL_MUTABLE_P (in FIELD_DECL) DECL_DEPENDENT_P (in USING_DECL) LABEL_DECL_BREAK (in LABEL_DECL) 1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL). DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL) DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL) USING_DECL_TYPENAME_P (in USING_DECL) DECL_VLA_CAPTURE_P (in FIELD_DECL) DECL_ARRAY_PARAMETER_P (in PARM_DECL) LABEL_DECL_CONTINUE (in LABEL_DECL) 2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL). DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL) DECL_CONSTRAINT_VAR_P (in a PARM_DECL) TEMPLATE_DECL_COMPLEX_ALIAS_P (in TEMPLATE_DECL) DECL_INSTANTIATING_NSDMI_P (in a FIELD_DECL) 3: DECL_IN_AGGR_P. 4: DECL_C_BIT_FIELD (in a FIELD_DECL) DECL_ANON_UNION_VAR_P (in a VAR_DECL) DECL_SELF_REFERENCE_P (in a TYPE_DECL) DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL) 5: DECL_INTERFACE_KNOWN. 6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL). DECL_FIELD_IS_BASE (in FIELD_DECL) TYPE_DECL_ALIAS_P (in TYPE_DECL) 7: DECL_DEAD_FOR_LOCAL (in VAR_DECL). DECL_THUNK_P (in a member FUNCTION_DECL) DECL_NORMAL_CAPTURE_P (in FIELD_DECL) 8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL) Usage of language-independent fields in a language-dependent manner: TYPE_ALIAS_SET This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so forth as a substitute for the mark bits provided in `lang_type'. At present, only the six low-order bits are used. TYPE_LANG_SLOT_1 For an ENUMERAL_TYPE, this is ENUM_TEMPLATE_INFO. For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS BINFO_VIRTUALS For a binfo, this is a TREE_LIST. There is an entry for each virtual function declared either in BINFO or its direct and indirect primary bases. The BV_DELTA of each node gives the amount by which to adjust the `this' pointer when calling the function. If the method is an overridden version of a base class method, then it is assumed that, prior to adjustment, the this pointer points to an object of the base class. The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable index of the vcall offset for this entry. The BV_FN is the declaration for the virtual function itself. If BV_LOST_PRIMARY is set, it means that this entry is for a lost primary virtual base and can be left null in the vtable. BINFO_VTABLE This is an expression with POINTER_TYPE that gives the value to which the vptr should be initialized. Use get_vtbl_decl_for_binfo to extract the VAR_DECL for the complete vtable. DECL_VINDEX This field is NULL for a non-virtual function. For a virtual function, it is eventually set to an INTEGER_CST indicating the index in the vtable at which this function can be found. When a virtual function is declared, but before it is known what function is overridden, this field is the error_mark_node. Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is the virtual function this one overrides, and whose TREE_CHAIN is the old DECL_VINDEX. */ /* Language-specific tree checkers. */ #define VAR_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL) #define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \ TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL) #define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \ || TREE_CODE (NODE) == FUNCTION_DECL) #define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \ TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL) #define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) #define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) #define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \ TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define THUNK_FUNCTION_CHECK(NODE) __extension__ \ ({ __typeof (NODE) const __t = (NODE); \ if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \ || !__t->decl_common.lang_specific->u.fn.thunk_p) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \ __t; }) #else #define THUNK_FUNCTION_CHECK(NODE) (NODE) #endif /* Language-dependent contents of an identifier. */ struct GTY(()) lang_identifier { struct c_common_identifier c_common; cxx_binding *namespace_bindings; cxx_binding *bindings; tree class_template_info; tree label_value; }; /* Return a typed pointer version of T if it designates a C++ front-end identifier. */ inline lang_identifier* identifier_p (tree t) { if (TREE_CODE (t) == IDENTIFIER_NODE) return (lang_identifier*) t; return NULL; } /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword, and C_RID_YYCODE is the token number wanted by Yacc. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_5 (ID) #define LANG_IDENTIFIER_CAST(NODE) \ ((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE)) struct GTY(()) template_parm_index { struct tree_common common; int index; int level; int orig_level; tree decl; }; struct GTY(()) ptrmem_cst { struct tree_common common; tree member; }; typedef struct ptrmem_cst * ptrmem_cst_t; #define IDENTIFIER_GLOBAL_VALUE(NODE) \ namespace_binding ((NODE), global_namespace) #define SET_IDENTIFIER_GLOBAL_VALUE(NODE, VAL) \ set_namespace_binding ((NODE), global_namespace, (VAL)) #define IDENTIFIER_NAMESPACE_VALUE(NODE) \ namespace_binding ((NODE), current_namespace) #define SET_IDENTIFIER_NAMESPACE_VALUE(NODE, VAL) \ set_namespace_binding ((NODE), current_namespace, (VAL)) #define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE)) #define BIND_EXPR_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE)) /* Used to mark the block around the member initializers and cleanups. */ #define BIND_EXPR_BODY_BLOCK(NODE) \ TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE)) #define FUNCTION_NEEDS_BODY_BLOCK(NODE) \ (DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \ || LAMBDA_FUNCTION_P (NODE)) #define STATEMENT_LIST_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE)) #define STATEMENT_LIST_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE)) /* Mark the outer curly brace BLOCK. */ #define BLOCK_OUTER_CURLY_BRACE_P(NODE) TREE_LANG_FLAG_0 (BLOCK_CHECK (NODE)) /* Nonzero if this statement should be considered a full-expression, i.e., if temporaries created during this statement should have their destructors run at the end of this statement. */ #define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE)) /* Marks the result of a statement expression. */ #define EXPR_STMT_STMT_EXPR_RESULT(NODE) \ TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE)) /* Nonzero if this statement-expression does not have an associated scope. */ #define STMT_EXPR_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE)) #define COND_EXPR_IS_VEC_DELETE(NODE) \ TREE_LANG_FLAG_0 (COND_EXPR_CHECK (NODE)) /* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual sense of `same'. */ #define same_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_STRICT) /* Returns nonzero iff NODE is a declaration for the global function `main'. */ #define DECL_MAIN_P(NODE) \ (DECL_EXTERN_C_FUNCTION_P (NODE) \ && DECL_NAME (NODE) != NULL_TREE \ && MAIN_NAME_P (DECL_NAME (NODE)) \ && flag_hosted) /* The overloaded FUNCTION_DECL. */ #define OVL_FUNCTION(NODE) \ (((struct tree_overload*)OVERLOAD_CHECK (NODE))->function) #define OVL_CHAIN(NODE) TREE_CHAIN (NODE) /* Polymorphic access to FUNCTION and CHAIN. */ #define OVL_CURRENT(NODE) \ ((TREE_CODE (NODE) == OVERLOAD) ? OVL_FUNCTION (NODE) : (NODE)) #define OVL_NEXT(NODE) \ ((TREE_CODE (NODE) == OVERLOAD) ? TREE_CHAIN (NODE) : NULL_TREE) /* If set, this was imported in a using declaration. This is not to confuse with being used somewhere, which is not important for this node. */ #define OVL_USED(NODE) TREE_USED (OVERLOAD_CHECK (NODE)) /* If set, this OVERLOAD was created for argument-dependent lookup and can be freed afterward. */ #define OVL_ARG_DEPENDENT(NODE) TREE_LANG_FLAG_0 (OVERLOAD_CHECK (NODE)) struct GTY(()) tree_overload { struct tree_common common; tree function; }; struct GTY(()) tree_template_decl { struct tree_decl_common common; tree arguments; tree result; }; /* Returns true iff NODE is a BASELINK. */ #define BASELINK_P(NODE) \ (TREE_CODE (NODE) == BASELINK) /* The BINFO indicating the base in which lookup found the BASELINK_FUNCTIONS. */ #define BASELINK_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo) /* The functions referred to by the BASELINK; either a FUNCTION_DECL, a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */ #define BASELINK_FUNCTIONS(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->functions) /* The BINFO in which the search for the functions indicated by this baselink began. This base is used to determine the accessibility of functions selected by overload resolution. */ #define BASELINK_ACCESS_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo) /* For a type-conversion operator, the BASELINK_OPTYPE indicates the type to which the conversion should occur. This value is important if the BASELINK_FUNCTIONS include a template conversion operator -- the BASELINK_OPTYPE can be used to determine what type the user requested. */ #define BASELINK_OPTYPE(NODE) \ (TREE_CHAIN (BASELINK_CHECK (NODE))) /* Nonzero if this baselink was from a qualified lookup. */ #define BASELINK_QUALIFIED_P(NODE) \ TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE)) struct GTY(()) tree_baselink { struct tree_common common; tree binfo; tree functions; tree access_binfo; }; /* The different kinds of ids that we encounter. */ enum cp_id_kind { /* Not an id at all. */ CP_ID_KIND_NONE, /* An unqualified-id that is not a template-id. */ CP_ID_KIND_UNQUALIFIED, /* An unqualified-id that is a dependent name. */ CP_ID_KIND_UNQUALIFIED_DEPENDENT, /* An unqualified template-id. */ CP_ID_KIND_TEMPLATE_ID, /* A qualified-id. */ CP_ID_KIND_QUALIFIED }; /* The various kinds of C++0x warnings we encounter. */ enum cpp0x_warn_str { /* extended initializer lists */ CPP0X_INITIALIZER_LISTS, /* explicit conversion operators */ CPP0X_EXPLICIT_CONVERSION, /* variadic templates */ CPP0X_VARIADIC_TEMPLATES, /* lambda expressions */ CPP0X_LAMBDA_EXPR, /* C++0x auto */ CPP0X_AUTO, /* scoped enums */ CPP0X_SCOPED_ENUMS, /* defaulted and deleted functions */ CPP0X_DEFAULTED_DELETED, /* inline namespaces */ CPP0X_INLINE_NAMESPACES, /* override controls, override/final */ CPP0X_OVERRIDE_CONTROLS, /* non-static data member initializers */ CPP0X_NSDMI, /* user defined literals */ CPP0X_USER_DEFINED_LITERALS, /* delegating constructors */ CPP0X_DELEGATING_CTORS, /* inheriting constructors */ CPP0X_INHERITING_CTORS, /* C++11 attributes */ CPP0X_ATTRIBUTES, /* ref-qualified member functions */ CPP0X_REF_QUALIFIER }; /* The various kinds of operation used by composite_pointer_type. */ enum composite_pointer_operation { /* comparison */ CPO_COMPARISON, /* conversion */ CPO_CONVERSION, /* conditional expression */ CPO_CONDITIONAL_EXPR }; /* Possible cases of expression list used by build_x_compound_expr_from_list. */ enum expr_list_kind { ELK_INIT, /* initializer */ ELK_MEM_INIT, /* member initializer */ ELK_FUNC_CAST /* functional cast */ }; /* Possible cases of implicit bad rhs conversions. */ enum impl_conv_rhs { ICR_DEFAULT_ARGUMENT, /* default argument */ ICR_CONVERTING, /* converting */ ICR_INIT, /* initialization */ ICR_ARGPASS, /* argument passing */ ICR_RETURN, /* return */ ICR_ASSIGN /* assignment */ }; /* Possible cases of implicit or explicit bad conversions to void. */ enum impl_conv_void { ICV_CAST, /* (explicit) conversion to void */ ICV_SECOND_OF_COND, /* second operand of conditional expression */ ICV_THIRD_OF_COND, /* third operand of conditional expression */ ICV_RIGHT_OF_COMMA, /* right operand of comma operator */ ICV_LEFT_OF_COMMA, /* left operand of comma operator */ ICV_STATEMENT, /* statement */ ICV_THIRD_IN_FOR /* for increment expression */ }; /* Possible invalid uses of an abstract class that might not have a specific associated declaration. */ enum GTY(()) abstract_class_use { ACU_UNKNOWN, /* unknown or decl provided */ ACU_CAST, /* cast to abstract class */ ACU_NEW, /* new-expression of abstract class */ ACU_THROW, /* throw-expression of abstract class */ ACU_CATCH, /* catch-parameter of abstract class */ ACU_ARRAY, /* array of abstract class */ ACU_RETURN, /* return type of abstract class */ ACU_PARM /* parameter type of abstract class */ }; /* Macros for access to language-specific slots in an identifier. */ #define IDENTIFIER_NAMESPACE_BINDINGS(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->namespace_bindings) #define IDENTIFIER_TEMPLATE(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->class_template_info) /* The IDENTIFIER_BINDING is the innermost cxx_binding for the identifier. It's PREVIOUS is the next outermost binding. Each VALUE field is a DECL for the associated declaration. Thus, name lookup consists simply of pulling off the node at the front of the list (modulo oddities for looking up the names of types, and such.) You can use SCOPE field to determine the scope that bound the name. */ #define IDENTIFIER_BINDING(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->bindings) /* TREE_TYPE only indicates on local and class scope the current type. For namespace scope, the presence of a type in any namespace is indicated with global_type_node, and the real type behind must be found through lookup. */ #define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE) #define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE) #define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE)) #define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0) #define IDENTIFIER_LABEL_VALUE(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->label_value) #define SET_IDENTIFIER_LABEL_VALUE(NODE, VALUE) \ IDENTIFIER_LABEL_VALUE (NODE) = (VALUE) /* Nonzero if this identifier is used as a virtual function name somewhere (optimizes searches). */ #define IDENTIFIER_VIRTUAL_P(NODE) TREE_LANG_FLAG_1 (NODE) /* Nonzero if this identifier is the prefix for a mangled C++ operator name. */ #define IDENTIFIER_OPNAME_P(NODE) TREE_LANG_FLAG_2 (NODE) /* Nonzero if this identifier is the name of a type-conversion operator. */ #define IDENTIFIER_TYPENAME_P(NODE) \ TREE_LANG_FLAG_4 (NODE) /* Nonzero if this identifier is the name of a constructor or destructor. */ #define IDENTIFIER_CTOR_OR_DTOR_P(NODE) \ TREE_LANG_FLAG_3 (NODE) /* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague linkage which the prelinker has assigned to this translation unit. */ #define IDENTIFIER_REPO_CHOSEN(NAME) \ (TREE_LANG_FLAG_6 (NAME)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly) /* The tokens stored in the default argument. */ #define DEFARG_TOKENS(NODE) \ (((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens) #define DEFARG_INSTANTIATIONS(NODE) \ (((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations) struct GTY (()) tree_default_arg { struct tree_common common; struct cp_token_cache *tokens; vec<tree, va_gc> *instantiations; }; #define DEFERRED_NOEXCEPT_PATTERN(NODE) \ (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern) #define DEFERRED_NOEXCEPT_ARGS(NODE) \ (((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args) #define DEFERRED_NOEXCEPT_SPEC_P(NODE) \ ((NODE) && (TREE_PURPOSE (NODE)) \ && (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT)) #define UNEVALUATED_NOEXCEPT_SPEC_P(NODE) \ (DEFERRED_NOEXCEPT_SPEC_P (NODE) \ && DEFERRED_NOEXCEPT_PATTERN (TREE_PURPOSE (NODE)) == NULL_TREE) struct GTY (()) tree_deferred_noexcept { struct tree_base base; tree pattern; tree args; }; /* The condition associated with the static assertion. This must be an integral constant expression. */ #define STATIC_ASSERT_CONDITION(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition) /* The message associated with the static assertion. This must be a string constant, which will be emitted as an error message when the static assert condition is false. */ #define STATIC_ASSERT_MESSAGE(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message) /* Source location information for a static assertion. */ #define STATIC_ASSERT_SOURCE_LOCATION(NODE) \ (((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location) struct GTY (()) tree_static_assert { struct tree_common common; tree condition; tree message; location_t location; }; struct GTY (()) tree_argument_pack_select { struct tree_common common; tree argument_pack; int index; }; /* The different kinds of traits that we encounter. */ enum cp_trait_kind { CPTK_BASES, CPTK_DIRECT_BASES, CPTK_HAS_NOTHROW_ASSIGN, CPTK_HAS_NOTHROW_CONSTRUCTOR, CPTK_HAS_NOTHROW_COPY, CPTK_HAS_TRIVIAL_ASSIGN, CPTK_HAS_TRIVIAL_CONSTRUCTOR, CPTK_HAS_TRIVIAL_COPY, CPTK_HAS_TRIVIAL_DESTRUCTOR, CPTK_HAS_VIRTUAL_DESTRUCTOR, CPTK_IS_ABSTRACT, CPTK_IS_BASE_OF, CPTK_IS_CLASS, CPTK_IS_EMPTY, CPTK_IS_ENUM, CPTK_IS_FINAL, CPTK_IS_LITERAL_TYPE, CPTK_IS_POD, CPTK_IS_POLYMORPHIC, CPTK_IS_SAME_AS, CPTK_IS_STD_LAYOUT, CPTK_IS_TRIVIAL, CPTK_IS_TRIVIALLY_ASSIGNABLE, CPTK_IS_TRIVIALLY_CONSTRUCTIBLE, CPTK_IS_TRIVIALLY_COPYABLE, CPTK_IS_UNION, CPTK_UNDERLYING_TYPE }; /* The types that we are processing. */ #define TRAIT_EXPR_TYPE1(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1) #define TRAIT_EXPR_TYPE2(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2) /* The specific trait that we are processing. */ #define TRAIT_EXPR_KIND(NODE) \ (((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind) struct GTY (()) tree_trait_expr { struct tree_common common; tree type1; tree type2; enum cp_trait_kind kind; }; /* Based off of TYPE_ANONYMOUS_P. */ #define LAMBDA_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_LAMBDA_EXPR (NODE)) /* Test if FUNCTION_DECL is a lambda function. */ #define LAMBDA_FUNCTION_P(FNDECL) \ (DECL_OVERLOADED_OPERATOR_P (FNDECL) == CALL_EXPR \ && LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL))) enum cp_lambda_default_capture_mode_type { CPLD_NONE, CPLD_COPY, CPLD_REFERENCE }; /* The method of default capture, if any. */ #define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode) /* The capture-list, including `this'. Each capture is stored as a FIELD_DECL * so that the name, type, and field are all together, whether or not it has * been added to the lambda's class type. TREE_LIST: TREE_PURPOSE: The FIELD_DECL for this capture. TREE_VALUE: The initializer. This is part of a GNU extension. */ #define LAMBDA_EXPR_CAPTURE_LIST(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list) /* During parsing of the lambda-introducer, the node in the capture-list that holds the 'this' capture. During parsing of the body, the capture proxy for that node. */ #define LAMBDA_EXPR_THIS_CAPTURE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture) /* Predicate tracking whether `this' is in the effective capture set. */ #define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \ LAMBDA_EXPR_THIS_CAPTURE(NODE) /* Predicate tracking whether the lambda was declared 'mutable'. */ #define LAMBDA_EXPR_MUTABLE_P(NODE) \ TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE)) /* The return type in the expression. * NULL_TREE indicates that none was specified. */ #define LAMBDA_EXPR_RETURN_TYPE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->return_type) /* The source location of the lambda. */ #define LAMBDA_EXPR_LOCATION(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus) /* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL, FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */ #define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope) /* If EXTRA_SCOPE, this is the number of the lambda within that scope. */ #define LAMBDA_EXPR_DISCRIMINATOR(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator) /* During parsing of the lambda, a vector of capture proxies which need to be pushed once we're done processing a nested lambda. */ #define LAMBDA_EXPR_PENDING_PROXIES(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies) /* The closure type of the lambda. Note that the TREE_TYPE of a LAMBDA_EXPR is always NULL_TREE, because we need to instantiate the LAMBDA_EXPR in order to instantiate the type. */ #define LAMBDA_EXPR_CLOSURE(NODE) \ (((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->closure) struct GTY (()) tree_lambda_expr { struct tree_typed typed; tree capture_list; tree this_capture; tree return_type; tree extra_scope; tree closure; vec<tree, va_gc> *pending_proxies; location_t locus; enum cp_lambda_default_capture_mode_type default_capture_mode; int discriminator; }; /* A (typedef,context,usage location) triplet. It represents a typedef used through a context at a given source location. e.g. struct foo { typedef int myint; }; struct bar { foo::myint v; // #1<-- this location. }; In bar, the triplet will be (myint, foo, #1). */ struct GTY(()) qualified_typedef_usage_s { tree typedef_decl; tree context; location_t locus; }; typedef struct qualified_typedef_usage_s qualified_typedef_usage_t; /* Non-zero if this template specialization has access violations that should be rechecked when the function is instantiated outside argument deduction. */ #define TINFO_HAS_ACCESS_ERRORS(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE))) #define FNDECL_HAS_ACCESS_ERRORS(NODE) \ (TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE))) /* Non-zero if this variable template specialization was specified using a template-id, so it's a partial or full specialization and not a definition of the member template of a particular class specialization. */ #define TINFO_USED_TEMPLATE_ID(NODE) \ (TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE))) struct GTY(()) tree_template_info { struct tree_common common; vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking; }; // Constraint information for a C++ declaration. Constraint information is // comprised of: // // - a constraint expression introduced by the template header // - a constraint expression introduced by a function declarator // - the associated constraints, which are the conjunction of those, // and used for declaration matching // // The template and declarator requirements are kept to support pretty // printing constrained declarations. struct GTY(()) tree_constraint_info { struct tree_base base; tree template_reqs; tree declarator_reqs; tree associated_constr; }; // Require that pointer P is non-null before returning. template<typename T> inline T* check_nonnull (T* p) { gcc_assert (p); return p; } // Returns true iff T is non-null and represents constraint info. inline tree_constraint_info * check_constraint_info (tree t) { if (t && TREE_CODE (t) == CONSTRAINT_INFO) return (tree_constraint_info *)t; return NULL; } // Access the expression describing the template constraints. This may be // null if no constraints were introduced in the template parameter list, // a requirements clause after the template parameter list, or constraints // through a constrained-type-specifier. #define CI_TEMPLATE_REQS(NODE) \ check_constraint_info (check_nonnull(NODE))->template_reqs // Access the expression describing the trailing constraints. This is non-null // for any implicit instantiation of a constrained declaration. For a // templated declaration it is non-null only when a trailing requires-clause // was specified. #define CI_DECLARATOR_REQS(NODE) \ check_constraint_info (check_nonnull(NODE))->declarator_reqs // The computed associated constraint expression for a declaration. #define CI_ASSOCIATED_CONSTRAINTS(NODE) \ check_constraint_info (check_nonnull(NODE))->associated_constr // Access the logical constraints on the template parameters introduced // at a given template parameter list level indicated by NODE. #define TEMPLATE_PARMS_CONSTRAINTS(NODE) \ TREE_TYPE (TREE_LIST_CHECK (NODE)) // Access the logical constraints on the template parameter declaration // indicated by NODE. #define TEMPLATE_PARM_CONSTRAINTS(NODE) \ TREE_TYPE (TREE_LIST_CHECK (NODE)) /* Non-zero if the noexcept is present in a compound requirement. */ #define COMPOUND_REQ_NOEXCEPT_P(NODE) \ TREE_LANG_FLAG_0 (TREE_CHECK (NODE, COMPOUND_REQ)) /* The constraints on an 'auto' placeholder type, used in an argument deduction constraint. */ #define PLACEHOLDER_TYPE_CONSTRAINTS(NODE) \ DECL_SIZE_UNIT (TYPE_NAME (NODE)) /* The expression evaluated by the predicate constraint. */ #define PRED_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, PRED_CONSTR), 0) /* The concept of a concept check. */ #define CHECK_CONSTR_CONCEPT(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 0) /* The template arguments of a concept check. */ #define CHECK_CONSTR_ARGS(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 1) /* The expression validated by the predicate constraint. */ #define EXPR_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, EXPR_CONSTR), 0) /* The type validated by the predicate constraint. */ #define TYPE_CONSTR_TYPE(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, TYPE_CONSTR), 0) /* In an implicit conversion constraint, the source expression. */ #define ICONV_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 0) /* In an implicit conversion constraint, the target type. */ #define ICONV_CONSTR_TYPE(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 1) /* In an argument deduction constraint, the source expression. */ #define DEDUCT_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 0) /* In an argument deduction constraint, the target type pattern. */ #define DEDUCT_CONSTR_PATTERN(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 1) /* In an argument deduction constraint, the list of placeholder nodes. */ #define DEDUCT_CONSTR_PLACEHOLDER(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 2) /* The expression of an exception constraint. */ #define EXCEPT_CONSTR_EXPR(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, EXCEPT_CONSTR), 0) /* In a parameterized constraint, the local parameters. */ #define PARM_CONSTR_PARMS(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 0) /* In a parameterized constraint, the operand. */ #define PARM_CONSTR_OPERAND(NODE) \ TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 1) /* Whether a PARM_DECL represents a local parameter in a requires-expression. */ #define CONSTRAINT_VAR_P(NODE) \ DECL_LANG_FLAG_2 (TREE_CHECK (NODE, PARM_DECL)) /* The concept constraining this constrained template-parameter. */ #define CONSTRAINED_PARM_CONCEPT(NODE) \ DECL_SIZE_UNIT (TYPE_DECL_CHECK (NODE)) /* Any extra template arguments specified for a constrained template-parameter. */ #define CONSTRAINED_PARM_EXTRA_ARGS(NODE) \ DECL_SIZE (TYPE_DECL_CHECK (NODE)) /* The first template parameter of CONSTRAINED_PARM_CONCEPT to be used as a prototype for the constrained parameter in finish_shorthand_constraint, attached for convenience. */ #define CONSTRAINED_PARM_PROTOTYPE(NODE) \ DECL_INITIAL (TYPE_DECL_CHECK (NODE)) enum cp_tree_node_structure_enum { TS_CP_GENERIC, TS_CP_IDENTIFIER, TS_CP_TPI, TS_CP_PTRMEM, TS_CP_BINDING, TS_CP_OVERLOAD, TS_CP_BASELINK, TS_CP_TEMPLATE_DECL, TS_CP_WRAPPER, TS_CP_DEFAULT_ARG, TS_CP_DEFERRED_NOEXCEPT, TS_CP_STATIC_ASSERT, TS_CP_ARGUMENT_PACK_SELECT, TS_CP_TRAIT_EXPR, TS_CP_LAMBDA_EXPR, TS_CP_TEMPLATE_INFO, TS_CP_CONSTRAINT_INFO, TS_CP_USERDEF_LITERAL, LAST_TS_CP_ENUM }; /* The resulting tree type. */ union GTY((desc ("cp_tree_node_structure (&%h)"), chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node { union tree_node GTY ((tag ("TS_CP_GENERIC"), desc ("tree_node_structure (&%h)"))) generic; struct template_parm_index GTY ((tag ("TS_CP_TPI"))) tpi; struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem; struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload; struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink; struct tree_template_decl GTY ((tag ("TS_CP_TEMPLATE_DECL"))) template_decl; struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg; struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept; struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier; struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT"))) static_assertion; struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT"))) argument_pack_select; struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR"))) trait_expression; struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR"))) lambda_expression; struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO"))) template_info; struct tree_constraint_info GTY ((tag ("TS_CP_CONSTRAINT_INFO"))) constraint_info; struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL"))) userdef_literal; }; enum cp_tree_index { CPTI_JAVA_BYTE_TYPE, CPTI_JAVA_SHORT_TYPE, CPTI_JAVA_INT_TYPE, CPTI_JAVA_LONG_TYPE, CPTI_JAVA_FLOAT_TYPE, CPTI_JAVA_DOUBLE_TYPE, CPTI_JAVA_CHAR_TYPE, CPTI_JAVA_BOOLEAN_TYPE, CPTI_WCHAR_DECL, CPTI_VTABLE_ENTRY_TYPE, CPTI_DELTA_TYPE, CPTI_VTABLE_INDEX_TYPE, CPTI_CLEANUP_TYPE, CPTI_VTT_PARM_TYPE, CPTI_CLASS_TYPE, CPTI_UNKNOWN_TYPE, CPTI_INIT_LIST_TYPE, CPTI_VTBL_TYPE, CPTI_VTBL_PTR_TYPE, CPTI_STD, CPTI_ABI, CPTI_CONST_TYPE_INFO_TYPE, CPTI_TYPE_INFO_PTR_TYPE, CPTI_ABORT_FNDECL, CPTI_AGGR_TAG, CPTI_CTOR_IDENTIFIER, CPTI_COMPLETE_CTOR_IDENTIFIER, CPTI_BASE_CTOR_IDENTIFIER, CPTI_DTOR_IDENTIFIER, CPTI_COMPLETE_DTOR_IDENTIFIER, CPTI_BASE_DTOR_IDENTIFIER, CPTI_DELETING_DTOR_IDENTIFIER, CPTI_DELTA_IDENTIFIER, CPTI_IN_CHARGE_IDENTIFIER, CPTI_VTT_PARM_IDENTIFIER, CPTI_NELTS_IDENTIFIER, CPTI_THIS_IDENTIFIER, CPTI_PFN_IDENTIFIER, CPTI_VPTR_IDENTIFIER, CPTI_STD_IDENTIFIER, CPTI_LANG_NAME_C, CPTI_LANG_NAME_CPLUSPLUS, CPTI_LANG_NAME_JAVA, CPTI_EMPTY_EXCEPT_SPEC, CPTI_NOEXCEPT_TRUE_SPEC, CPTI_NOEXCEPT_FALSE_SPEC, CPTI_JCLASS, CPTI_TERMINATE, CPTI_CALL_UNEXPECTED, CPTI_ATEXIT_FN_PTR_TYPE, CPTI_ATEXIT, CPTI_DSO_HANDLE, CPTI_DCAST, CPTI_KEYED_CLASSES, CPTI_NULLPTR, CPTI_NULLPTR_TYPE, CPTI_MAX }; extern GTY(()) tree cp_global_trees[CPTI_MAX]; #define java_byte_type_node cp_global_trees[CPTI_JAVA_BYTE_TYPE] #define java_short_type_node cp_global_trees[CPTI_JAVA_SHORT_TYPE] #define java_int_type_node cp_global_trees[CPTI_JAVA_INT_TYPE] #define java_long_type_node cp_global_trees[CPTI_JAVA_LONG_TYPE] #define java_float_type_node cp_global_trees[CPTI_JAVA_FLOAT_TYPE] #define java_double_type_node cp_global_trees[CPTI_JAVA_DOUBLE_TYPE] #define java_char_type_node cp_global_trees[CPTI_JAVA_CHAR_TYPE] #define java_boolean_type_node cp_global_trees[CPTI_JAVA_BOOLEAN_TYPE] #define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL] #define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE] /* The type used to represent an offset by which to adjust the `this' pointer in pointer-to-member types. */ #define delta_type_node cp_global_trees[CPTI_DELTA_TYPE] /* The type used to represent an index into the vtable. */ #define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE] #define class_type_node cp_global_trees[CPTI_CLASS_TYPE] #define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE] #define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE] #define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE] #define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE] #define std_node cp_global_trees[CPTI_STD] #define abi_node cp_global_trees[CPTI_ABI] #define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE] #define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE] #define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL] #define current_aggr cp_global_trees[CPTI_AGGR_TAG] #define nullptr_node cp_global_trees[CPTI_NULLPTR] #define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE] /* We cache these tree nodes so as to call get_identifier less frequently. */ /* The name of a constructor that takes an in-charge parameter to decide whether or not to construct virtual base classes. */ #define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER] /* The name of a constructor that constructs virtual base classes. */ #define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER] /* The name of a constructor that does not construct virtual base classes. */ #define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER] /* The name of a destructor that takes an in-charge parameter to decide whether or not to destroy virtual base classes and whether or not to delete the object. */ #define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes. */ #define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER] /* The name of a destructor that does not destroy virtual base classes. */ #define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes, and then deletes the entire object. */ #define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER] #define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER] #define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER] /* The name of the parameter that contains a pointer to the VTT to use for this subobject constructor or destructor. */ #define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER] #define nelts_identifier cp_global_trees[CPTI_NELTS_IDENTIFIER] #define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER] #define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER] #define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER] /* The name of the std namespace. */ #define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER] #define lang_name_c cp_global_trees[CPTI_LANG_NAME_C] #define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS] #define lang_name_java cp_global_trees[CPTI_LANG_NAME_JAVA] /* Exception specifier used for throw(). */ #define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC] #define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC] #define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC] /* If non-NULL, a POINTER_TYPE equivalent to (java::lang::Class*). */ #define jclass_node cp_global_trees[CPTI_JCLASS] /* The declaration for `std::terminate'. */ #define terminate_node cp_global_trees[CPTI_TERMINATE] /* The declaration for "__cxa_call_unexpected". */ #define call_unexpected_node cp_global_trees[CPTI_CALL_UNEXPECTED] /* The type of the function-pointer argument to "__cxa_atexit" (or "std::atexit", if "__cxa_atexit" is not being used). */ #define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE] /* A pointer to `std::atexit'. */ #define atexit_node cp_global_trees[CPTI_ATEXIT] /* A pointer to `__dso_handle'. */ #define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE] /* The declaration of the dynamic_cast runtime. */ #define dynamic_cast_node cp_global_trees[CPTI_DCAST] /* The type of a destructor. */ #define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE] /* The type of the vtt parameter passed to subobject constructors and destructors. */ #define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE] /* A TREE_LIST of the dynamic classes whose vtables may have to be emitted in this translation unit. */ #define keyed_classes cp_global_trees[CPTI_KEYED_CLASSES] /* Node to indicate default access. This must be distinct from the access nodes in tree.h. */ #define access_default_node null_node /* Global state. */ struct GTY(()) saved_scope { vec<cxx_saved_binding, va_gc> *old_bindings; tree old_namespace; vec<tree, va_gc> *decl_ns_list; tree class_name; tree class_type; tree access_specifier; tree function_decl; vec<tree, va_gc> *lang_base; tree lang_name; tree template_parms; cp_binding_level *x_previous_class_level; tree x_saved_tree; /* Only used for uses of this in trailing return type. */ tree x_current_class_ptr; tree x_current_class_ref; int x_processing_template_decl; int x_processing_specialization; BOOL_BITFIELD x_processing_explicit_instantiation : 1; BOOL_BITFIELD need_pop_function_context : 1; int unevaluated_operand; int inhibit_evaluation_warnings; int noexcept_operand; /* If non-zero, implicit "omp declare target" attribute is added into the attribute lists. */ int omp_declare_target_attribute; struct stmt_tree_s x_stmt_tree; cp_binding_level *class_bindings; cp_binding_level *bindings; hash_map<tree, tree> *GTY((skip)) x_local_specializations; struct saved_scope *prev; }; extern GTY(()) struct saved_scope *scope_chain; /* The current open namespace. */ #define current_namespace scope_chain->old_namespace /* The stack for namespaces of current declarations. */ #define decl_namespace_list scope_chain->decl_ns_list /* IDENTIFIER_NODE: name of current class */ #define current_class_name scope_chain->class_name /* _TYPE: the type of the current class */ #define current_class_type scope_chain->class_type /* When parsing a class definition, the access specifier most recently given by the user, or, if no access specifier was given, the default value appropriate for the kind of class (i.e., struct, class, or union). */ #define current_access_specifier scope_chain->access_specifier /* Pointer to the top of the language name stack. */ #define current_lang_base scope_chain->lang_base #define current_lang_name scope_chain->lang_name /* When parsing a template declaration, a TREE_LIST represents the active template parameters. Each node in the list represents one level of template parameters. The innermost level is first in the list. The depth of each level is stored as an INTEGER_CST in the TREE_PURPOSE of each node. The parameters for that level are stored in the TREE_VALUE. */ #define current_template_parms scope_chain->template_parms #define processing_template_decl scope_chain->x_processing_template_decl #define processing_specialization scope_chain->x_processing_specialization #define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation /* RAII sentinel to handle clearing processing_template_decl and restoring it when done. */ struct processing_template_decl_sentinel { int saved; processing_template_decl_sentinel (bool reset = true) : saved (processing_template_decl) { if (reset) processing_template_decl = 0; } ~processing_template_decl_sentinel() { processing_template_decl = saved; } }; /* RAII sentinel to disable certain warnings during template substitution and elsewhere. */ struct warning_sentinel { int &flag; int val; warning_sentinel(int& flag, bool suppress=true) : flag(flag), val(flag) { if (suppress) flag = 0; } ~warning_sentinel() { flag = val; } }; /* The cached class binding level, from the most recently exited class, or NULL if none. */ #define previous_class_level scope_chain->x_previous_class_level /* A map from local variable declarations in the body of the template presently being instantiated to the corresponding instantiated local variables. */ #define local_specializations scope_chain->x_local_specializations /* Nonzero if we are parsing the operand of a noexcept operator. */ #define cp_noexcept_operand scope_chain->noexcept_operand /* A list of private types mentioned, for deferred access checking. */ struct GTY((for_user)) cxx_int_tree_map { unsigned int uid; tree to; }; struct cxx_int_tree_map_hasher : ggc_ptr_hash<cxx_int_tree_map> { static hashval_t hash (cxx_int_tree_map *); static bool equal (cxx_int_tree_map *, cxx_int_tree_map *); }; struct named_label_entry; struct named_label_hasher : ggc_ptr_hash<named_label_entry> { static hashval_t hash (named_label_entry *); static bool equal (named_label_entry *, named_label_entry *); }; /* Global state pertinent to the current function. */ struct GTY(()) language_function { struct c_language_function base; tree x_cdtor_label; tree x_current_class_ptr; tree x_current_class_ref; tree x_eh_spec_block; tree x_in_charge_parm; tree x_vtt_parm; tree x_return_value; tree x_auto_return_pattern; BOOL_BITFIELD returns_value : 1; BOOL_BITFIELD returns_null : 1; BOOL_BITFIELD returns_abnormally : 1; BOOL_BITFIELD infinite_loop: 1; BOOL_BITFIELD x_in_function_try_handler : 1; BOOL_BITFIELD x_in_base_initializer : 1; /* True if this function can throw an exception. */ BOOL_BITFIELD can_throw : 1; BOOL_BITFIELD invalid_constexpr : 1; hash_table<named_label_hasher> *x_named_labels; cp_binding_level *bindings; vec<tree, va_gc> *x_local_names; /* Tracking possibly infinite loops. This is a vec<tree> only because vec<bool> doesn't work with gtype. */ vec<tree, va_gc> *infinite_loops; hash_table<cxx_int_tree_map_hasher> *extern_decl_map; }; /* The current C++-specific per-function global variables. */ #define cp_function_chain (cfun->language) /* In a constructor destructor, the point at which all derived class destroying/construction has been done. I.e., just before a constructor returns, or before any base class destroying will be done in a destructor. */ #define cdtor_label cp_function_chain->x_cdtor_label /* When we're processing a member function, current_class_ptr is the PARM_DECL for the `this' pointer. The current_class_ref is an expression for `*this'. */ #define current_class_ptr \ (*(cfun && cp_function_chain \ ? &cp_function_chain->x_current_class_ptr \ : &scope_chain->x_current_class_ptr)) #define current_class_ref \ (*(cfun && cp_function_chain \ ? &cp_function_chain->x_current_class_ref \ : &scope_chain->x_current_class_ref)) /* The EH_SPEC_BLOCK for the exception-specifiers for the current function, if any. */ #define current_eh_spec_block cp_function_chain->x_eh_spec_block /* The `__in_chrg' parameter for the current function. Only used for constructors and destructors. */ #define current_in_charge_parm cp_function_chain->x_in_charge_parm /* The `__vtt_parm' parameter for the current function. Only used for constructors and destructors. */ #define current_vtt_parm cp_function_chain->x_vtt_parm /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ #define current_function_returns_value cp_function_chain->returns_value /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ #define current_function_returns_null cp_function_chain->returns_null /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ #define current_function_returns_abnormally \ cp_function_chain->returns_abnormally /* Set to 0 at beginning of a function definition, set to 1 if we see an obvious infinite loop. This can have false positives and false negatives, so it should only be used as a heuristic. */ #define current_function_infinite_loop cp_function_chain->infinite_loop /* Nonzero if we are processing a base initializer. Zero elsewhere. */ #define in_base_initializer cp_function_chain->x_in_base_initializer #define in_function_try_handler cp_function_chain->x_in_function_try_handler /* Expression always returned from function, or error_mark_node otherwise, for use by the automatic named return value optimization. */ #define current_function_return_value \ (cp_function_chain->x_return_value) /* A type involving 'auto' to be used for return type deduction. */ #define current_function_auto_return_pattern \ (cp_function_chain->x_auto_return_pattern) /* True if NAME is the IDENTIFIER_NODE for an overloaded "operator new" or "operator delete". */ #define NEW_DELETE_OPNAME_P(NAME) \ ((NAME) == ansi_opname (NEW_EXPR) \ || (NAME) == ansi_opname (VEC_NEW_EXPR) \ || (NAME) == ansi_opname (DELETE_EXPR) \ || (NAME) == ansi_opname (VEC_DELETE_EXPR)) #define ansi_opname(CODE) \ (operator_name_info[(int) (CODE)].identifier) #define ansi_assopname(CODE) \ (assignment_operator_name_info[(int) (CODE)].identifier) /* TRUE if a tree code represents a statement. */ extern bool statement_code_p[MAX_TREE_CODES]; #define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)] enum languages { lang_c, lang_cplusplus, lang_java }; /* Macros to make error reporting functions' lives easier. */ #define TYPE_LINKAGE_IDENTIFIER(NODE) \ (TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE))) #define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE))) #define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE))) /* Nonzero if NODE has no name for linkage purposes. */ #define TYPE_ANONYMOUS_P(NODE) \ (OVERLOAD_TYPE_P (NODE) && anon_aggrname_p (TYPE_LINKAGE_IDENTIFIER (NODE))) /* The _DECL for this _TYPE. */ #define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE))) /* Nonzero if T is a type that could resolve to any kind of concrete type at instantiation time. */ #define WILDCARD_TYPE_P(T) \ (TREE_CODE (T) == TEMPLATE_TYPE_PARM \ || TREE_CODE (T) == TYPENAME_TYPE \ || TREE_CODE (T) == TYPEOF_TYPE \ || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \ || TREE_CODE (T) == DECLTYPE_TYPE) /* Nonzero if T is a class (or struct or union) type. Also nonzero for template type parameters, typename types, and instantiated template template parameters. Keep these checks in ascending code order. */ #define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T)) /* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or union type. */ #define SET_CLASS_TYPE_P(T, VAL) \ (TYPE_LANG_FLAG_5 (T) = (VAL)) /* Nonzero if T is a class type. Zero for template type parameters, typename types, and so forth. */ #define CLASS_TYPE_P(T) \ (RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T)) /* Nonzero if T is a class type but not an union. */ #define NON_UNION_CLASS_TYPE_P(T) \ (CLASS_TYPE_P (T) && TREE_CODE (T) != UNION_TYPE) /* Keep these checks in ascending code order. */ #define RECORD_OR_UNION_CODE_P(T) \ ((T) == RECORD_TYPE || (T) == UNION_TYPE) #define OVERLOAD_TYPE_P(T) \ (CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE) /* True if this a "Java" type, defined in 'extern "Java"'. */ #define TYPE_FOR_JAVA(NODE) TYPE_LANG_FLAG_3 (NODE) /* True if this type is dependent. This predicate is only valid if TYPE_DEPENDENT_P_VALID is true. */ #define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE) /* True if dependent_type_p has been called for this type, with the result that TYPE_DEPENDENT_P is valid. */ #define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE) /* Nonzero if this type is const-qualified. */ #define CP_TYPE_CONST_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0) /* Nonzero if this type is volatile-qualified. */ #define CP_TYPE_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0) /* Nonzero if this type is restrict-qualified. */ #define CP_TYPE_RESTRICT_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0) /* Nonzero if this type is const-qualified, but not volatile-qualified. Other qualifiers are ignored. This macro is used to test whether or not it is OK to bind an rvalue to a reference. */ #define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \ == TYPE_QUAL_CONST) #define FUNCTION_ARG_CHAIN(NODE) \ TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES which refers to a user-written parameter. */ #define FUNCTION_FIRST_USER_PARMTYPE(NODE) \ skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Similarly, but for DECL_ARGUMENTS. */ #define FUNCTION_FIRST_USER_PARM(NODE) \ skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE)) /* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and ambiguity issues. */ #define DERIVED_FROM_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE) /* Gives the visibility specification for a class type. */ #define CLASSTYPE_VISIBILITY(TYPE) \ DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE)) #define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \ DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE)) struct GTY (()) tree_pair_s { tree purpose; tree value; }; typedef tree_pair_s *tree_pair_p; /* This is a few header flags for 'struct lang_type'. Actually, all but the first are used only for lang_type_class; they are put in this structure to save space. */ struct GTY(()) lang_type_header { BOOL_BITFIELD is_lang_type_class : 1; BOOL_BITFIELD has_type_conversion : 1; BOOL_BITFIELD has_copy_ctor : 1; BOOL_BITFIELD has_default_ctor : 1; BOOL_BITFIELD const_needs_init : 1; BOOL_BITFIELD ref_needs_init : 1; BOOL_BITFIELD has_const_copy_assign : 1; BOOL_BITFIELD spare : 1; }; /* This structure provides additional information above and beyond what is provide in the ordinary tree_type. In the past, we used it for the types of class types, template parameters types, typename types, and so forth. However, there can be many (tens to hundreds of thousands) of template parameter types in a compilation, and there's no need for this additional information in that case. Therefore, we now use this data structure only for class types. In the past, it was thought that there would be relatively few class types. However, in the presence of heavy use of templates, many (i.e., thousands) of classes can easily be generated. Therefore, we should endeavor to keep the size of this structure to a minimum. */ struct GTY(()) lang_type_class { struct lang_type_header h; unsigned char align; unsigned has_mutable : 1; unsigned com_interface : 1; unsigned non_pod_class : 1; unsigned nearly_empty_p : 1; unsigned user_align : 1; unsigned has_copy_assign : 1; unsigned has_new : 1; unsigned has_array_new : 1; unsigned gets_delete : 2; unsigned interface_only : 1; unsigned interface_unknown : 1; unsigned contains_empty_class_p : 1; unsigned anon_aggr : 1; unsigned non_zero_init : 1; unsigned empty_p : 1; unsigned vec_new_uses_cookie : 1; unsigned declared_class : 1; unsigned diamond_shaped : 1; unsigned repeated_base : 1; unsigned being_defined : 1; unsigned java_interface : 1; unsigned debug_requested : 1; unsigned fields_readonly : 1; unsigned use_template : 2; unsigned ptrmemfunc_flag : 1; unsigned was_anonymous : 1; unsigned lazy_default_ctor : 1; unsigned lazy_copy_ctor : 1; unsigned lazy_copy_assign : 1; unsigned lazy_destructor : 1; unsigned has_const_copy_ctor : 1; unsigned has_complex_copy_ctor : 1; unsigned has_complex_copy_assign : 1; unsigned non_aggregate : 1; unsigned has_complex_dflt : 1; unsigned has_list_ctor : 1; unsigned non_std_layout : 1; unsigned is_literal : 1; unsigned lazy_move_ctor : 1; unsigned lazy_move_assign : 1; unsigned has_complex_move_ctor : 1; unsigned has_complex_move_assign : 1; unsigned has_constexpr_ctor : 1; /* When adding a flag here, consider whether or not it ought to apply to a template instance if it applies to the template. If so, make sure to copy it in instantiate_class_template! */ /* There are some bits left to fill out a 32-bit word. Keep track of this by updating the size of this bitfield whenever you add or remove a flag. */ unsigned dummy : 3; tree primary_base; vec<tree_pair_s, va_gc> *vcall_indices; tree vtables; tree typeinfo_var; vec<tree, va_gc> *vbases; binding_table nested_udts; tree as_base; vec<tree, va_gc> *pure_virtuals; tree friend_classes; vec<tree, va_gc> * GTY((reorder ("resort_type_method_vec"))) methods; tree key_method; tree decl_list; tree template_info; tree befriending_classes; /* In a RECORD_TYPE, information specific to Objective-C++, such as a list of adopted protocols or a pointer to a corresponding @interface. See objc/objc-act.h for details. */ tree objc_info; /* sorted_fields is sorted based on a pointer, so we need to be able to resort it if pointers get rearranged. */ struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields"))) sorted_fields; /* FIXME reuse another field? */ tree lambda_expr; }; struct GTY(()) lang_type_ptrmem { struct lang_type_header h; tree record; }; struct GTY(()) lang_type { union lang_type_u { struct lang_type_header GTY((skip (""))) h; struct lang_type_class GTY((tag ("1"))) c; struct lang_type_ptrmem GTY((tag ("0"))) ptrmem; } GTY((desc ("%h.h.is_lang_type_class"))) u; }; #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define LANG_TYPE_CLASS_CHECK(NODE) __extension__ \ ({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \ if (! lt->u.h.is_lang_type_class) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.c; }) #define LANG_TYPE_PTRMEM_CHECK(NODE) __extension__ \ ({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \ if (lt->u.h.is_lang_type_class) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.ptrmem; }) #else #define LANG_TYPE_CLASS_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.c) #define LANG_TYPE_PTRMEM_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.ptrmem) #endif /* ENABLE_TREE_CHECKING */ /* Nonzero for _CLASSTYPE means that operator delete is defined. */ #define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete) #define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1) /* Nonzero if `new NODE[x]' should cause the allocation of extra storage to indicate how many array elements are in use. */ #define TYPE_VEC_NEW_USES_COOKIE(NODE) \ (CLASS_TYPE_P (NODE) \ && LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie) /* Nonzero means that this _CLASSTYPE node defines ways of converting itself to other types. */ #define TYPE_HAS_CONVERSION(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.has_type_conversion) /* Nonzero means that NODE (a class type) has a default constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor) /* Nonzero means that NODE (a class type) has a copy constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor) /* Nonzero means that NODE (a class type) has a move constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign) /* Nonzero means that NODE (a class type) has a destructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor) /* Nonzero means that NODE (a class type) is final */ #define CLASSTYPE_FINAL(NODE) \ TYPE_FINAL_P (NODE) /* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */ #define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign) /* True iff the class type NODE has an "operator =" whose parameter has a parameter of type "const X&". */ #define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.has_const_copy_assign) /* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */ #define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->h.has_copy_ctor) #define TYPE_HAS_CONST_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor) /* Nonzero if this class has an X(initializer_list<T>) constructor. */ #define TYPE_HAS_LIST_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor) /* Nonzero if this class has a constexpr constructor other than a copy/move constructor. Note that a class can have constexpr constructors for static initialization even if it isn't a literal class. */ #define TYPE_HAS_CONSTEXPR_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor) /* Nonzero if this class defines an overloaded operator new. (An operator new [] doesn't count.) */ #define TYPE_HAS_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_new) /* Nonzero if this class defines an overloaded operator new[]. */ #define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_array_new) /* Nonzero means that this type is being defined. I.e., the left brace starting the definition of this type has been seen. */ #define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined) /* Nonzero means that this type is either complete or being defined, so we can do lookup in it. */ #define COMPLETE_OR_OPEN_TYPE_P(NODE) \ (COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE))) /* Mark bits for repeated base checks. */ #define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE)) /* Nonzero if the class NODE has multiple paths to the same (virtual) base object. */ #define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped) /* Nonzero if the class NODE has multiple instances of the same base type. */ #define CLASSTYPE_REPEATED_BASE_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->repeated_base) /* The member function with which the vtable will be emitted: the first noninline non-pure-virtual member function. NULL_TREE if there is no key function or if this is a class template */ #define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method) /* Vector member functions defined in this class. Each element is either a FUNCTION_DECL, a TEMPLATE_DECL, or an OVERLOAD. All functions with the same name end up in the same slot. The first two elements are for constructors, and destructors, respectively. All template conversion operators to innermost template dependent types are overloaded on the next slot, if they exist. Note, the names for these functions will not all be the same. The non-template conversion operators & templated conversions to non-innermost template types are next, followed by ordinary member functions. There may be empty entries at the end of the vector. The conversion operators are unsorted. The ordinary member functions are sorted, once the class is complete. */ #define CLASSTYPE_METHOD_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->methods) /* For class templates, this is a TREE_LIST of all member data, functions, types, and friends in the order of declaration. The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend, and the RECORD_TYPE for the class template otherwise. */ #define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list) /* The slot in the CLASSTYPE_METHOD_VEC where constructors go. */ #define CLASSTYPE_CONSTRUCTOR_SLOT 0 /* The slot in the CLASSTYPE_METHOD_VEC where destructors go. */ #define CLASSTYPE_DESTRUCTOR_SLOT 1 /* The first slot in the CLASSTYPE_METHOD_VEC where conversion operators can appear. */ #define CLASSTYPE_FIRST_CONVERSION_SLOT 2 /* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These are the constructors that take an in-charge parameter. */ #define CLASSTYPE_CONSTRUCTORS(NODE) \ ((*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_CONSTRUCTOR_SLOT]) /* A FUNCTION_DECL for the destructor for NODE. These are the destructors that take an in-charge parameter. If CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL until the destructor is created with lazily_declare_fn. */ #define CLASSTYPE_DESTRUCTORS(NODE) \ (CLASSTYPE_METHOD_VEC (NODE) \ ? (*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_DESTRUCTOR_SLOT] \ : NULL_TREE) /* A dictionary of the nested user-defined-types (class-types, or enums) found within this class. This table includes nested member class templates. */ #define CLASSTYPE_NESTED_UTDS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nested_udts) /* Nonzero if NODE has a primary base class, i.e., a base class with which it shares the virtual function table pointer. */ #define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \ (CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE) /* If non-NULL, this is the binfo for the primary base class, i.e., the base class which contains the virtual function table pointer for this class. */ #define CLASSTYPE_PRIMARY_BINFO(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->primary_base) /* A vector of BINFOs for the direct and indirect virtual base classes that this type uses in a post-order depth-first left-to-right order. (In other words, these bases appear in the order that they should be initialized.) */ #define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases) /* The type corresponding to NODE when NODE is used as a base class, i.e., NODE without virtual base classes or tail padding. */ #define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base) /* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */ #define IS_FAKE_BASE_TYPE(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \ && CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE)) /* These are the size and alignment of the type without its virtual base classes, for when we use this type as a base itself. */ #define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE)) /* The alignment of NODE, without its virtual bases, in bytes. */ #define CLASSTYPE_ALIGN_UNIT(NODE) \ (CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT) /* True if this a Java interface type, declared with '__attribute__ ((java_interface))'. */ #define TYPE_JAVA_INTERFACE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->java_interface) /* A vec<tree> of virtual functions which cannot be inherited by derived classes. When deriving from this type, the derived class must provide its own definition for each of these functions. */ #define CLASSTYPE_PURE_VIRTUALS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals) /* Nonzero means that this type is an abstract class type. */ #define ABSTRACT_CLASS_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE)) /* Nonzero means that this type has an X() constructor. */ #define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.has_default_ctor) /* Nonzero means that this type contains a mutable member. */ #define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable) #define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE)) /* Nonzero means that this class type is not POD for the purpose of layout (as defined in the ABI). This is different from the language's POD. */ #define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class) /* Nonzero means that this class type is a non-standard-layout class. */ #define CLASSTYPE_NON_STD_LAYOUT(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout) /* Nonzero means that this class contains pod types whose default initialization is not a zero initialization (namely, pointers to data members). */ #define CLASSTYPE_NON_ZERO_INIT_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init) /* Nonzero if this class is "empty" in the sense of the C++ ABI. */ #define CLASSTYPE_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->empty_p) /* Nonzero if this class is "nearly empty", i.e., contains only a virtual function table pointer. */ #define CLASSTYPE_NEARLY_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p) /* Nonzero if this class contains an empty subobject. */ #define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p) /* A list of class types of which this type is a friend. The TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the case of a template friend. */ #define CLASSTYPE_FRIEND_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->friend_classes) /* A list of the classes which grant friendship to this class. */ #define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes) /* The associated LAMBDA_EXPR that made this class. */ #define CLASSTYPE_LAMBDA_EXPR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr) /* The extra mangling scope for this closure type. */ #define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \ (LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE))) /* Say whether this node was declared as a "class" or a "struct". */ #define CLASSTYPE_DECLARED_CLASS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->declared_class) /* Nonzero if this class has const members which have no specified initialization. */ #define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init : 0) #define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init = (VALUE)) /* Nonzero if this class has ref members which have no specified initialization. */ #define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init : 0) #define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init = (VALUE)) /* Nonzero if this class is included from a header file which employs `#pragma interface', and it is not included in its implementation file. */ #define CLASSTYPE_INTERFACE_ONLY(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_only) /* True if we have already determined whether or not vtables, VTTs, typeinfo, and other similar per-class data should be emitted in this translation unit. This flag does not indicate whether or not these items should be emitted; it only indicates that we know one way or the other. */ #define CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0) /* The opposite of CLASSTYPE_INTERFACE_KNOWN. */ #define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown) #define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X)) #define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1) #define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0) /* Nonzero if a _DECL node requires us to output debug info for this class. */ #define CLASSTYPE_DEBUG_REQUESTED(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->debug_requested) /* Additional macros for inheritance information. */ /* Nonzero means that this class is on a path leading to a new vtable. */ #define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE) /* Nonzero means B (a BINFO) has its own vtable. Any copies will not have this flag set. */ #define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B)) /* Compare a BINFO_TYPE with another type for equality. For a binfo, this is functionally equivalent to using same_type_p, but measurably faster. At least one of the arguments must be a BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If BINFO_TYPE(T) ever stops being the main variant of the class the binfo is for, this macro must change. */ #define SAME_BINFO_TYPE_P(A, B) ((A) == (B)) /* Any subobject that needs a new vtable must have a vptr and must not be a non-virtual primary base (since it would then use the vtable from a derived class and never become non-primary.) */ #define SET_BINFO_NEW_VTABLE_MARKED(B) \ (BINFO_NEW_VTABLE_MARKED (B) = 1, \ gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \ gcc_assert (TYPE_VFIELD (BINFO_TYPE (B)))) /* Nonzero if this binfo is for a dependent base - one that should not be searched. */ #define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE) /* Nonzero if this binfo has lost its primary base binfo (because that is a nearly-empty virtual base that has been taken by some other base in the complete hierarchy. */ #define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE) /* Nonzero if this BINFO is a primary base class. */ #define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE) /* Used by various search routines. */ #define IDENTIFIER_MARKED(NODE) TREE_LANG_FLAG_0 (NODE) /* A vec<tree_pair_s> of the vcall indices associated with the class NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual function. The VALUE is the index into the virtual table where the vcall offset for that function is stored, when NODE is a virtual base. */ #define CLASSTYPE_VCALL_INDICES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices) /* The various vtables for the class NODE. The primary vtable will be first, followed by the construction vtables and VTT, if any. */ #define CLASSTYPE_VTABLES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vtables) /* The std::type_info variable representing this class, or NULL if no such variable has been created. This field is only set for the TYPE_MAIN_VARIANT of the class. */ #define CLASSTYPE_TYPEINFO_VAR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var) /* Accessor macros for the BINFO_VIRTUALS list. */ /* The number of bytes by which to adjust the `this' pointer when calling this virtual function. Subtract this value from the this pointer. Always non-NULL, might be constant zero though. */ #define BV_DELTA(NODE) (TREE_PURPOSE (NODE)) /* If non-NULL, the vtable index at which to find the vcall offset when calling this virtual function. Add the value at that vtable index to the this pointer. */ #define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE)) /* The function to call. */ #define BV_FN(NODE) (TREE_VALUE (NODE)) /* Whether or not this entry is for a lost primary virtual base. */ #define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE will be NULL_TREE to indicate a throw specification of `()', or no exceptions allowed. For a noexcept specification, TREE_VALUE is NULL_TREE and TREE_PURPOSE is the constant-expression. For a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT (for templates) or an OVERLOAD list of functions (for implicitly declared functions). */ #define TYPE_RAISES_EXCEPTIONS(NODE) \ TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()' or noexcept(true). */ #define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the case for things declared noexcept(true) and, with -fnothrow-opt, for throw() functions. */ #define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE) /* The binding level associated with the namespace. */ #define NAMESPACE_LEVEL(NODE) \ (LANG_DECL_NS_CHECK (NODE)->level) /* Flags shared by all forms of DECL_LANG_SPECIFIC. Some of the flags live here only to make lang_decl_min/fn smaller. Do not make this struct larger than 32 bits; instead, make sel smaller. */ struct GTY(()) lang_decl_base { unsigned selector : 16; /* Larger than necessary for faster access. */ ENUM_BITFIELD(languages) language : 4; unsigned use_template : 2; unsigned not_really_extern : 1; /* var or fn */ unsigned initialized_in_class : 1; /* var or fn */ unsigned repo_available_p : 1; /* var or fn */ unsigned threadprivate_or_deleted_p : 1; /* var or fn */ unsigned anticipated_p : 1; /* fn, type or template */ /* anticipated_p reused as DECL_OMP_PRIVATIZED_MEMBER in var */ unsigned friend_or_tls : 1; /* var, fn, type or template */ unsigned template_conv_p : 1; /* var or template */ unsigned odr_used : 1; /* var or fn */ unsigned u2sel : 1; unsigned concept_p : 1; /* applies to vars and functions */ /* 0 spare bits */ }; /* True for DECL codes which have template info and access. */ #define LANG_DECL_HAS_MIN(NODE) \ (VAR_OR_FUNCTION_DECL_P (NODE) \ || TREE_CODE (NODE) == FIELD_DECL \ || TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL \ || TREE_CODE (NODE) == USING_DECL) /* DECL_LANG_SPECIFIC for the above codes. */ struct GTY(()) lang_decl_min { struct lang_decl_base base; /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_ALIAS. In a FUNCTION_DECL for which DECL_THUNK_P does not hold, VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is DECL_TEMPLATE_INFO. */ tree template_info; union lang_decl_u2 { /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_VIRTUAL_OFFSET. Otherwise this is DECL_ACCESS. */ tree GTY ((tag ("0"))) access; /* For VAR_DECL in function, this is DECL_DISCRIMINATOR. */ int GTY ((tag ("1"))) discriminator; } GTY ((desc ("%0.u.base.u2sel"))) u2; }; /* Additional DECL_LANG_SPECIFIC information for functions. */ struct GTY(()) lang_decl_fn { struct lang_decl_min min; /* In an overloaded operator, this is the value of DECL_OVERLOADED_OPERATOR_P. */ ENUM_BITFIELD (tree_code) operator_code : 16; unsigned global_ctor_p : 1; unsigned global_dtor_p : 1; unsigned assignment_operator_p : 1; unsigned static_function : 1; unsigned pure_virtual : 1; unsigned defaulted_p : 1; unsigned has_in_charge_parm_p : 1; unsigned has_vtt_parm_p : 1; unsigned pending_inline_p : 1; unsigned nonconverting : 1; unsigned thunk_p : 1; unsigned this_thunk_p : 1; unsigned hidden_friend_p : 1; unsigned omp_declare_reduction_p : 1; /* 2 spare bits on 32-bit hosts, 34 on 64-bit hosts. */ /* For a non-thunk function decl, this is a tree list of friendly classes. For a thunk function decl, it is the thunked to function decl. */ tree befriending_classes; /* For a non-virtual FUNCTION_DECL, this is DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both this pointer and result pointer adjusting thunks are chained here. This pointer thunks to return pointer thunks will be chained on the return pointer thunk. */ tree context; union lang_decl_u5 { /* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is DECL_CLONED_FUNCTION. */ tree GTY ((tag ("0"))) cloned_function; /* In a FUNCTION_DECL for which THUNK_P holds this is the THUNK_FIXED_OFFSET. */ HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset; } GTY ((desc ("%1.thunk_p"))) u5; union lang_decl_u3 { struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info; struct language_function * GTY ((tag ("0"))) saved_language_function; } GTY ((desc ("%1.pending_inline_p"))) u; }; /* DECL_LANG_SPECIFIC for namespaces. */ struct GTY(()) lang_decl_ns { struct lang_decl_base base; cp_binding_level *level; tree ns_using; tree ns_users; }; /* DECL_LANG_SPECIFIC for parameters. */ struct GTY(()) lang_decl_parm { struct lang_decl_base base; int level; int index; }; /* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a union rather than a struct containing a union as its only field, but tree.h declares it as a struct. */ struct GTY(()) lang_decl { union GTY((desc ("%h.base.selector"))) lang_decl_u { struct lang_decl_base GTY ((default)) base; struct lang_decl_min GTY((tag ("0"))) min; struct lang_decl_fn GTY ((tag ("1"))) fn; struct lang_decl_ns GTY((tag ("2"))) ns; struct lang_decl_parm GTY((tag ("3"))) parm; } u; }; /* Looks through a template (if present) to find what it declares. */ #define STRIP_TEMPLATE(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define LANG_DECL_MIN_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!LANG_DECL_HAS_MIN (NODE)) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.min; }) /* We want to be able to check DECL_CONSTRUCTOR_P and such on a function template, not just on a FUNCTION_DECL. So when looking for things in lang_decl_fn, look down through a TEMPLATE_DECL into its result. */ #define LANG_DECL_FN_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \ if (!DECL_DECLARES_FUNCTION_P (NODE) || lt->u.base.selector != 1) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.fn; }) #define LANG_DECL_NS_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (TREE_CODE (NODE) != NAMESPACE_DECL || lt->u.base.selector != 2) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.ns; }) #define LANG_DECL_PARM_CHECK(NODE) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (TREE_CODE (NODE) != PARM_DECL) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.parm; }) #define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.min.u2; }) #else #define LANG_DECL_MIN_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.min) #define LANG_DECL_FN_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn) #define LANG_DECL_NS_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.ns) #define LANG_DECL_PARM_CHECK(NODE) \ (&DECL_LANG_SPECIFIC (NODE)->u.parm) #define LANG_DECL_U2_CHECK(NODE, TF) \ (&DECL_LANG_SPECIFIC (NODE)->u.min.u2) #endif /* ENABLE_TREE_CHECKING */ /* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the declaration. Some entities (like a member function in a local class, or a local variable) do not have linkage at all, and this macro should not be used in those cases. Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was created by language-independent code, and has C linkage. Most VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */ #define DECL_LANGUAGE(NODE) \ (DECL_LANG_SPECIFIC (NODE) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.language \ : (TREE_CODE (NODE) == FUNCTION_DECL \ ? lang_c : lang_cplusplus)) /* Set the language linkage for NODE to LANGUAGE. */ #define SET_DECL_LANGUAGE(NODE, LANGUAGE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE)) /* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function is a constructor. */ #define DECL_CONSTRUCTOR_P(NODE) \ DECL_CXX_CONSTRUCTOR_P (STRIP_TEMPLATE (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete object. */ #define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == complete_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base object. */ #define DECL_BASE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == base_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the specialized in-charge constructor or the specialized not-in-charge constructor. */ #define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \ (DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \ && !DECL_CLONED_FUNCTION_P (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */ #define DECL_COPY_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0) /* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */ #define DECL_MOVE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE)) /* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL) is a destructor. */ #define DECL_DESTRUCTOR_P(NODE) \ DECL_CXX_DESTRUCTOR_P (STRIP_TEMPLATE (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the specialized in-charge constructor, in-charge deleting constructor, or the base destructor. */ #define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \ (DECL_DECLARES_FUNCTION_P (NODE) && DECL_DESTRUCTOR_P (NODE) \ && !DECL_CLONED_FUNCTION_P (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object. */ #define DECL_COMPLETE_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == complete_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base object. */ #define DECL_BASE_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == base_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object that deletes the object after it has been destroyed. */ #define DECL_DELETING_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == deleting_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or destructor. */ #define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true)) /* If DECL_CLONED_FUNCTION_P holds, this is the function that was cloned. */ #define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false)) /* Perform an action for each clone of FN, if FN is a function with clones. This macro should be used like: FOR_EACH_CLONE (clone, fn) { ... } */ #define FOR_EACH_CLONE(CLONE, FN) \ if (!(TREE_CODE (FN) == FUNCTION_DECL \ && (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \ || DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))))\ ; \ else \ for (CLONE = DECL_CHAIN (FN); \ CLONE && DECL_CLONED_FUNCTION_P (CLONE); \ CLONE = DECL_CHAIN (CLONE)) /* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */ #define DECL_DISCRIMINATOR_P(NODE) \ (VAR_P (NODE) && DECL_FUNCTION_SCOPE_P (NODE)) /* Discriminator for name mangling. */ #define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator) /* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */ #define DECL_DISCRIMINATOR_SET_P(NODE) \ (DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1) /* The index of a user-declared parameter in its function, starting at 1. All artificial parameters will have index 0. */ #define DECL_PARM_INDEX(NODE) \ (LANG_DECL_PARM_CHECK (NODE)->index) /* The level of a user-declared parameter in its function, starting at 1. A parameter of the function will have level 1; a parameter of the first nested function declarator (i.e. t in void f (void (*p)(T t))) will have level 2. */ #define DECL_PARM_LEVEL(NODE) \ (LANG_DECL_PARM_CHECK (NODE)->level) /* Nonzero if the VTT parm has been added to NODE. */ #define DECL_HAS_VTT_PARM_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p) /* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is required. */ #define DECL_NEEDS_VTT_PARM_P(NODE) \ (CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \ && (DECL_BASE_CONSTRUCTOR_P (NODE) \ || DECL_BASE_DESTRUCTOR_P (NODE))) /* Nonzero if NODE is a user-defined conversion operator. */ #define DECL_CONV_FN_P(NODE) \ (DECL_NAME (NODE) && IDENTIFIER_TYPENAME_P (DECL_NAME (NODE))) /* If FN is a conversion operator, the type to which it converts. Otherwise, NULL_TREE. */ #define DECL_CONV_FN_TYPE(FN) \ (DECL_CONV_FN_P (FN) ? TREE_TYPE (DECL_NAME (FN)) : NULL_TREE) /* Nonzero if NODE, which is a TEMPLATE_DECL, is a template conversion operator to a type dependent on the innermost template args. */ #define DECL_TEMPLATE_CONV_FN_P(NODE) \ (DECL_LANG_SPECIFIC (TEMPLATE_DECL_CHECK (NODE))->u.base.template_conv_p) /* Nonzero if NODE, a static data member, was declared in its class as an array of unknown bound. */ #define VAR_HAD_UNKNOWN_BOUND(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ ? DECL_LANG_SPECIFIC (NODE)->u.base.template_conv_p \ : false) #define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.template_conv_p = true) /* Set the overloaded operator code for NODE to CODE. */ #define SET_OVERLOADED_OPERATOR_CODE(NODE, CODE) \ (LANG_DECL_FN_CHECK (NODE)->operator_code = (CODE)) /* If NODE is an overloaded operator, then this returns the TREE_CODE associated with the overloaded operator. DECL_ASSIGNMENT_OPERATOR_P must also be checked to determine whether or not NODE is an assignment operator. If NODE is not an overloaded operator, ERROR_MARK is returned. Since the numerical value of ERROR_MARK is zero, this macro can be used as a predicate to test whether or not NODE is an overloaded operator. */ #define DECL_OVERLOADED_OPERATOR_P(NODE) \ (IDENTIFIER_OPNAME_P (DECL_NAME (NODE)) \ ? LANG_DECL_FN_CHECK (NODE)->operator_code : ERROR_MARK) /* Nonzero if NODE is an assignment operator (including += and such). */ #define DECL_ASSIGNMENT_OPERATOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->assignment_operator_p) /* For FUNCTION_DECLs: nonzero means that this function is a constructor or a destructor with an extra in-charge parameter to control whether or not virtual bases are constructed. */ #define DECL_HAS_IN_CHARGE_PARM_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p) /* Nonzero if DECL is a declaration of __builtin_constant_p. */ #define DECL_IS_BUILTIN_CONSTANT_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \ && DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P) /* Nonzero for _DECL means that this decl appears in (or will appear in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for detecting circularity in case members are multiply defined. In the case of a VAR_DECL, it is also used to determine how program storage should be allocated. */ #define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE)) /* Nonzero for a VAR_DECL means that the variable's initialization (if any) has been processed. (In general, DECL_INITIALIZED_P is !DECL_EXTERNAL, but static data members may be initialized even if not defined.) */ #define DECL_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL iff an explicit initializer was provided or a non-trivial constructor is called. */ #define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL that was initialized with a constant-expression. */ #define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \ (TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE))) /* Nonzero if the DECL was initialized in the class definition itself, rather than outside the class. This is used for both static member VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */ #define DECL_INITIALIZED_IN_CLASS_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ ->u.base.initialized_in_class) /* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr]. Only available for decls with DECL_LANG_SPECIFIC. */ #define DECL_ODR_USED(DECL) \ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \ ->u.base.odr_used) /* Nonzero for DECL means that this decl is just a friend declaration, and should not be added to the list of members for this class. */ #define DECL_FRIEND_P(NODE) \ (DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \ ->u.base.friend_or_tls) /* Nonzero if the thread-local variable was declared with __thread as opposed to thread_local. */ #define DECL_GNU_TLS_P(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \ && DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls) #define SET_DECL_GNU_TLS_P(NODE) \ (retrofit_lang_decl (VAR_DECL_CHECK (NODE)), \ DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls = true) /* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */ #define DECL_BEFRIENDING_CLASSES(NODE) \ (LANG_DECL_FN_CHECK (NODE)->befriending_classes) /* Nonzero for FUNCTION_DECL means that this decl is a static member function. */ #define DECL_STATIC_FUNCTION_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->static_function) /* Nonzero for FUNCTION_DECL means that this decl is a non-static member function. */ #define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \ (TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE) /* Nonzero for FUNCTION_DECL means that this decl is a member function (static or non-static). */ #define DECL_FUNCTION_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE)) /* Nonzero for FUNCTION_DECL means that this member function has `this' as const X *const. */ #define DECL_CONST_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for FUNCTION_DECL means that this member function has `this' as volatile X *const. */ #define DECL_VOLATILE_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for a DECL means that this member is a non-static member. */ #define DECL_NONSTATIC_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ || TREE_CODE (NODE) == FIELD_DECL) /* Nonzero for _DECL means that this member object type is mutable. */ #define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE)) /* Nonzero for _DECL means that this constructor or conversion function is non-converting. */ #define DECL_NONCONVERTING_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->nonconverting) /* Nonzero for FUNCTION_DECL means that this member function is a pure virtual function. */ #define DECL_PURE_VIRTUAL_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->pure_virtual) /* True (in a FUNCTION_DECL) if NODE is a virtual function that is an invalid overrider for a function from a base class. Once we have complained about an invalid overrider we avoid complaining about it again. */ #define DECL_INVALID_OVERRIDER_P(NODE) \ (DECL_LANG_FLAG_4 (NODE)) /* True (in a FUNCTION_DECL) if NODE is a function declared with an override virt-specifier */ #define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE)) /* The thunks associated with NODE, a FUNCTION_DECL. */ #define DECL_THUNKS(NODE) \ (DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE) /* Set DECL_THUNKS. */ #define SET_DECL_THUNKS(NODE,THUNKS) \ (LANG_DECL_FN_CHECK (NODE)->context = (THUNKS)) /* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this is the base it inherits from. */ #define DECL_INHERITED_CTOR_BASE(NODE) \ (DECL_CONSTRUCTOR_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE) /* Set the inherited base. */ #define SET_DECL_INHERITED_CTOR_BASE(NODE,INH) \ (LANG_DECL_FN_CHECK (NODE)->context = (INH)) /* Nonzero if NODE is a thunk, rather than an ordinary function. */ #define DECL_THUNK_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_LANG_SPECIFIC (NODE) \ && LANG_DECL_FN_CHECK (NODE)->thunk_p) /* Set DECL_THUNK_P for node. */ #define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \ (LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \ LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING)) /* Nonzero if NODE is a this pointer adjusting thunk. */ #define DECL_THIS_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p) /* Nonzero if NODE is a result pointer adjusting thunk. */ #define DECL_RESULT_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p) /* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */ #define DECL_NON_THUNK_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE)) /* Nonzero if NODE is `extern "C"'. */ #define DECL_EXTERN_C_P(NODE) \ (DECL_LANGUAGE (NODE) == lang_c) /* Nonzero if NODE is an `extern "C"' function. */ #define DECL_EXTERN_C_FUNCTION_P(NODE) \ (DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE)) /* True iff DECL is an entity with vague linkage whose definition is available in this translation unit. */ #define DECL_REPO_AVAILABLE_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p) /* True if DECL is declared 'constexpr'. */ #define DECL_DECLARED_CONSTEXPR_P(DECL) \ DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL))) // True if NODE was declared as 'concept'. The flag implies that the // declaration is constexpr, that the declaration cannot be specialized or // refined, and that the result type must be convertible to bool. #define DECL_DECLARED_CONCEPT_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.concept_p) /* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a template function. */ #define DECL_PRETTY_FUNCTION_P(NODE) \ (DECL_NAME (NODE) \ && !strcmp (IDENTIFIER_POINTER (DECL_NAME (NODE)), "__PRETTY_FUNCTION__")) /* Nonzero if the variable was declared to be thread-local. We need a special C++ version of this test because the middle-end DECL_THREAD_LOCAL_P uses the symtab, so we can't use it for templates. */ #define CP_DECL_THREAD_LOCAL_P(NODE) \ (TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))) /* The _TYPE context in which this _DECL appears. This field holds the class where a virtual function instance is actually defined. */ #define DECL_CLASS_CONTEXT(NODE) \ (DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE) /* For a non-member friend function, the class (if any) in which this friend was defined. For example, given: struct S { friend void f (); }; the DECL_FRIEND_CONTEXT for `f' will be `S'. */ #define DECL_FRIEND_CONTEXT(NODE) \ ((DECL_DECLARES_FUNCTION_P (NODE) \ && DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \ ? LANG_DECL_FN_CHECK (NODE)->context \ : NULL_TREE) /* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */ #define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \ (LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT)) #define CP_DECL_CONTEXT(NODE) \ (!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace) #define CP_TYPE_CONTEXT(NODE) \ (!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace) #define FROB_CONTEXT(NODE) \ ((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE)) /* 1 iff NODE has namespace scope, including the global namespace. */ #define DECL_NAMESPACE_SCOPE_P(NODE) \ (!DECL_TEMPLATE_PARM_P (NODE) \ && TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL) #define TYPE_NAMESPACE_SCOPE_P(NODE) \ (TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL) #define NAMESPACE_SCOPE_P(NODE) \ ((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \ || (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE))) /* 1 iff NODE is a class member. */ #define DECL_CLASS_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE))) #define TYPE_CLASS_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE))) /* 1 iff NODE is function-local. */ #define DECL_FUNCTION_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) \ && TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL) #define TYPE_FUNCTION_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL) /* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for both the primary typeinfo object and the associated NTBS name. */ #define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)) /* 1 iff VAR_DECL node NODE is virtual table or VTT. */ #define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE)) /* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */ #define FUNCTION_REF_QUALIFIED(NODE) \ TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE)) /* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */ #define FUNCTION_RVALUE_QUALIFIED(NODE) \ TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE)) /* Returns 1 iff VAR_DECL is a construction virtual table. DECL_VTABLE_OR_VTT_P will be true in this case and must be checked before using this macro. */ #define DECL_CONSTRUCTION_VTABLE_P(NODE) \ TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE)) /* 1 iff NODE is function-local, but for types. */ #define LOCAL_CLASS_P(NODE) \ (decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE) /* For a NAMESPACE_DECL: the list of using namespace directives The PURPOSE is the used namespace, the value is the namespace that is the common ancestor. */ #define DECL_NAMESPACE_USING(NODE) (LANG_DECL_NS_CHECK (NODE)->ns_using) /* In a NAMESPACE_DECL, the DECL_INITIAL is used to record all users of a namespace, to record the transitive closure of using namespace. */ #define DECL_NAMESPACE_USERS(NODE) (LANG_DECL_NS_CHECK (NODE)->ns_users) /* In a NAMESPACE_DECL, the list of namespaces which have associated themselves with this one. */ #define DECL_NAMESPACE_ASSOCIATIONS(NODE) \ DECL_INITIAL (NAMESPACE_DECL_CHECK (NODE)) /* In a NAMESPACE_DECL, points to the original namespace if this is a namespace alias. */ #define DECL_NAMESPACE_ALIAS(NODE) \ DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE)) #define ORIGINAL_NAMESPACE(NODE) \ (DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE)) /* Nonzero if NODE is the std namespace. */ #define DECL_NAMESPACE_STD_P(NODE) \ (TREE_CODE (NODE) == NAMESPACE_DECL \ && CP_DECL_CONTEXT (NODE) == global_namespace \ && DECL_NAME (NODE) == std_identifier) /* In a TREE_LIST concatenating using directives, indicate indirect directives */ #define TREE_INDIRECT_USING(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) /* In a TREE_LIST in an attribute list, indicates that the attribute must be applied at instantiation time. */ #define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) /* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag was inherited from a template parameter, not explicitly indicated. */ #define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE)) extern tree decl_shadowed_for_var_lookup (tree); extern void decl_shadowed_for_var_insert (tree, tree); /* Non zero if this is a using decl for a dependent scope. */ #define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE)) /* The scope named in a using decl. */ #define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE)) /* The decls named by a using decl. */ #define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE)) /* Non zero if the using decl refers to a dependent type. */ #define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE)) /* In a VAR_DECL, true if we have a shadowed local variable in the shadowed var table for this VAR_DECL. */ #define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \ (VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p) /* In a VAR_DECL for a variable declared in a for statement, this is the shadowed (local) variable. */ #define DECL_SHADOWED_FOR_VAR(NODE) \ (DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL) #define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \ (decl_shadowed_for_var_insert (NODE, VAL)) /* In a FUNCTION_DECL, this is nonzero if this function was defined in the class definition. We have saved away the text of the function, but have not yet processed it. */ #define DECL_PENDING_INLINE_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->pending_inline_p) /* If DECL_PENDING_INLINE_P holds, this is the saved text of the function. */ #define DECL_PENDING_INLINE_INFO(NODE) \ (LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info) /* Nonzero for TYPE_DECL means that it was written 'using name = type'. */ #define TYPE_DECL_ALIAS_P(NODE) \ DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE)) /* Nonzero for TEMPLATE_DECL means that it is a 'complex' alias template. */ #define TEMPLATE_DECL_COMPLEX_ALIAS_P(NODE) \ DECL_LANG_FLAG_2 (TEMPLATE_DECL_CHECK (NODE)) /* Nonzero for a type which is an alias for another type; i.e, a type which declaration was written 'using name-of-type = another-type'. */ #define TYPE_ALIAS_P(NODE) \ (TYPE_P (NODE) \ && TYPE_NAME (NODE) \ && TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \ && TYPE_DECL_ALIAS_P (TYPE_NAME (NODE))) /* For a class type: if this structure has many fields, we'll sort them and put them into a TREE_VEC. */ #define CLASSTYPE_SORTED_FIELDS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->sorted_fields) /* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or TEMPLATE_DECL, the entity is either a template specialization (if DECL_USE_TEMPLATE is nonzero) or the abstract instance of the template itself. In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a specialization or abstract instance. The TREE_VALUE is the template arguments used to specialize the template. Consider: template <typename T> struct S { friend void f(T) {} }; In this case, S<int>::f is, from the point of view of the compiler, an instantiation of a template -- but, from the point of view of the language, each instantiation of S results in a wholly unrelated global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f will be non-NULL, but DECL_USE_TEMPLATE will be zero. */ #define DECL_TEMPLATE_INFO(NODE) \ (DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK (NODE)) \ ->u.min.template_info) /* For a VAR_DECL, indicates that the variable is actually a non-static data member of anonymous union that has been promoted to variable status. */ #define DECL_ANON_UNION_VAR_P(NODE) \ (DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))) /* Template information for a RECORD_TYPE or UNION_TYPE. */ #define CLASSTYPE_TEMPLATE_INFO(NODE) \ (LANG_TYPE_CLASS_CHECK (RECORD_OR_UNION_CHECK (NODE))->template_info) /* Template information for an ENUMERAL_TYPE. Although an enumeration may not be a primary template, it may be declared within the scope of a primary template and the enumeration constants may depend on non-type template parameters. */ #define ENUM_TEMPLATE_INFO(NODE) \ (TYPE_LANG_SLOT_1 (ENUMERAL_TYPE_CHECK (NODE))) /* Template information for a template template parameter. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \ (LANG_TYPE_CLASS_CHECK (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)) \ ->template_info) /* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or BOUND_TEMPLATE_TEMPLATE_PARM type. Note that if NODE is a specialization of an alias template, this accessor returns the template info for the alias template, not the one (if any) for the template of the underlying type. */ #define TYPE_TEMPLATE_INFO(NODE) \ ((TYPE_ALIAS_P (NODE) && DECL_LANG_SPECIFIC (TYPE_NAME (NODE))) \ ? (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \ ? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \ : NULL_TREE) \ : ((TREE_CODE (NODE) == ENUMERAL_TYPE) \ ? ENUM_TEMPLATE_INFO (NODE) \ : ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \ ? TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (NODE) \ : (CLASS_TYPE_P (NODE) \ ? CLASSTYPE_TEMPLATE_INFO (NODE) \ : NULL_TREE)))) /* Set the template information for an ENUMERAL_, RECORD_, or UNION_TYPE to VAL. */ #define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \ (TREE_CODE (NODE) == ENUMERAL_TYPE \ ? (ENUM_TEMPLATE_INFO (NODE) = (VAL)) \ : ((CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \ ? (CLASSTYPE_TEMPLATE_INFO (NODE) = (VAL)) \ : (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL)))) #define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE)) #define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE)) #define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE) /* For a given TREE_VEC containing a template argument list, this property contains the number of arguments that are not defaulted. */ #define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE)) /* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT property. */ #define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \ NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE) #if CHECKING_P #define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) #else #define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \ NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \ ? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \ : TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE)) #endif /* The list of typedefs - used in the template - that need access checking at template instantiation time. FIXME this should be associated with the TEMPLATE_DECL, not the TEMPLATE_INFO. */ #define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \ ((struct tree_template_info*)TEMPLATE_INFO_CHECK \ (NODE))->typedefs_needing_access_checking /* We use TREE_VECs to hold template arguments. If there is only one level of template arguments, then the TREE_VEC contains the arguments directly. If there is more than one level of template arguments, then each entry in the TREE_VEC is itself a TREE_VEC, containing the template arguments for a single level. The first entry in the outer TREE_VEC is the outermost level of template parameters; the last is the innermost. It is incorrect to ever form a template argument vector containing only one level of arguments, but which is a TREE_VEC containing as its only entry the TREE_VEC for that level. For each TREE_VEC containing the template arguments for a single level, it's possible to get or set the number of non defaulted template arguments by using the accessor macros GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */ /* Nonzero if the template arguments is actually a vector of vectors, rather than just a vector. */ #define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \ (NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \ && TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC) /* The depth of a template argument vector. When called directly by the parser, we use a TREE_LIST rather than a TREE_VEC to represent template arguments. In fact, we may even see NULL_TREE if there are no template arguments. In both of those cases, there is only one level of template arguments. */ #define TMPL_ARGS_DEPTH(NODE) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1) /* The LEVELth level of the template ARGS. The outermost level of args is level 1, not level 0. */ #define TMPL_ARGS_LEVEL(ARGS, LEVEL) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \ ? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS)) /* Set the LEVELth level of the template ARGS to VAL. This macro does not work with single-level argument vectors. */ #define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \ (TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL)) /* Accesses the IDXth parameter in the LEVELth level of the ARGS. */ #define TMPL_ARG(ARGS, LEVEL, IDX) \ (TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX)) /* Given a single level of template arguments in NODE, return the number of arguments. */ #define NUM_TMPL_ARGS(NODE) \ (TREE_VEC_LENGTH (NODE)) /* Returns the innermost level of template arguments in ARGS. */ #define INNERMOST_TEMPLATE_ARGS(NODE) \ (get_innermost_template_args ((NODE), 1)) /* The number of levels of template parameters given by NODE. */ #define TMPL_PARMS_DEPTH(NODE) \ ((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE))) /* The TEMPLATE_DECL instantiated or specialized by NODE. This TEMPLATE_DECL will be the immediate parent, not the most general template. For example, in: template <class T> struct S { template <class U> void f(U); } the FUNCTION_DECL for S<int>::f<double> will have, as its DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'. As a special case, for a member friend template of a template class, this value will not be a TEMPLATE_DECL, but rather an IDENTIFIER_NODE or OVERLOAD indicating the name of the template and any explicit template arguments provided. For example, in: template <class T> struct S { friend void f<int>(int, double); } the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the DECL_TI_ARGS will be {int}. For a FIELD_DECL with a non-static data member initializer, this value is the FIELD_DECL it was instantiated from. */ #define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE)) /* The template arguments used to obtain this decl from the most general form of DECL_TI_TEMPLATE. For the example given for DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These are always the full set of arguments required to instantiate this declaration from the most general template specialized here. */ #define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE)) /* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE will be generated from a partial specialization, the TEMPLATE_DECL referred to here will be the original template. For example, given: template <typename T> struct S {}; template <typename T> struct S<T*> {}; the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */ #define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE)) #define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE)) /* For a template instantiation TYPE, returns the TYPE corresponding to the primary template. Otherwise returns TYPE itself. */ #define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \ ((CLASSTYPE_USE_TEMPLATE ((TYPE)) \ && !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \ ? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \ (CLASSTYPE_TI_TEMPLATE ((TYPE))))) \ : (TYPE)) /* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */ #define TYPE_TI_TEMPLATE(NODE) \ (TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE))) /* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */ #define TYPE_TI_ARGS(NODE) \ (TI_ARGS (TYPE_TEMPLATE_INFO (NODE))) #define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE) /* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the sense of [temp.mem]. */ #define DECL_MEMBER_TEMPLATE_P(NODE) \ (DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE))) /* Nonzero if the NODE corresponds to the template parameters for a member template, whose inline definition is being processed after the class definition is complete. */ #define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE) /* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */ #define DECL_PACK_P(NODE) \ (DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE))) /* Determines if NODE is an expansion of one or more parameter packs, e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define PACK_EXPANSION_P(NODE) \ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ || TREE_CODE (NODE) == EXPR_PACK_EXPANSION) /* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define PACK_EXPANSION_PATTERN(NODE) \ (TREE_CODE (NODE) == TYPE_PACK_EXPANSION? TREE_TYPE (NODE) \ : TREE_OPERAND (NODE, 0)) /* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ #define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \ if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \ TREE_TYPE (NODE) = VALUE; \ else \ TREE_OPERAND (NODE, 0) = VALUE /* The list of parameter packs used in the PACK_EXPANSION_* node. The TREE_VALUE of each TREE_LIST contains the parameter packs. */ #define PACK_EXPANSION_PARAMETER_PACKS(NODE) \ *(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \ ? &TREE_OPERAND (NODE, 1) \ : &TYPE_MINVAL (TYPE_PACK_EXPANSION_CHECK (NODE))) /* Any additional template args to be applied when substituting into the pattern, set by tsubst_pack_expansion for partial instantiations. */ #define PACK_EXPANSION_EXTRA_ARGS(NODE) \ *(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \ ? &TYPE_MAXVAL (NODE) \ : &TREE_OPERAND ((NODE), 2)) /* True iff this pack expansion is within a function context. */ #define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE) /* True iff this pack expansion is for sizeof.... */ #define PACK_EXPANSION_SIZEOF_P(NODE) TREE_LANG_FLAG_1 (NODE) /* True iff the wildcard can match a template parameter pack. */ #define WILDCARD_PACK_P(NODE) TREE_LANG_FLAG_0 (NODE) /* Determine if this is an argument pack. */ #define ARGUMENT_PACK_P(NODE) \ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \ || TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK) /* The arguments stored in an argument pack. Arguments are stored in a TREE_VEC, which may have length zero. */ #define ARGUMENT_PACK_ARGS(NODE) \ (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \ : TREE_OPERAND (NODE, 0)) /* Set the arguments stored in an argument pack. VALUE must be a TREE_VEC. */ #define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \ if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \ TREE_TYPE (NODE) = VALUE; \ else \ TREE_OPERAND (NODE, 0) = VALUE /* Whether the argument pack is "incomplete", meaning that more arguments can still be deduced. Incomplete argument packs are only used when the user has provided an explicit template argument list for a variadic function template. Some of the explicit template arguments will be placed into the beginning of the argument pack, but additional arguments might still be deduced. */ #define ARGUMENT_PACK_INCOMPLETE_P(NODE) \ TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE)) /* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template arguments used to fill this pack. */ #define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \ TREE_TYPE (ARGUMENT_PACK_ARGS (NODE)) /* In an ARGUMENT_PACK_SELECT, the argument pack from which an argument will be selected. */ #define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack) /* In an ARGUMENT_PACK_SELECT, the index of the argument we want to select. */ #define ARGUMENT_PACK_SELECT_INDEX(NODE) \ (((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index) /* In an ARGUMENT_PACK_SELECT, the actual underlying argument that the ARGUMENT_PACK_SELECT represents. */ #define ARGUMENT_PACK_SELECT_ARG(NODE) \ TREE_VEC_ELT (ARGUMENT_PACK_ARGS (ARGUMENT_PACK_SELECT_FROM_PACK (NODE)), \ ARGUMENT_PACK_SELECT_INDEX (NODE)) #define FOLD_EXPR_CHECK(NODE) \ TREE_CHECK4 (NODE, UNARY_LEFT_FOLD_EXPR, UNARY_RIGHT_FOLD_EXPR, \ BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR) #define BINARY_FOLD_EXPR_CHECK(NODE) \ TREE_CHECK2 (NODE, BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR) /* True if NODE is UNARY_FOLD_EXPR or a BINARY_FOLD_EXPR */ #define FOLD_EXPR_P(NODE) \ (TREE_CODE (NODE) == UNARY_LEFT_FOLD_EXPR \ || TREE_CODE (NODE) == UNARY_RIGHT_FOLD_EXPR \ || TREE_CODE (NODE) == BINARY_LEFT_FOLD_EXPR \ || TREE_CODE (NODE) == BINARY_RIGHT_FOLD_EXPR) /* True when NODE is a fold over a compound assignment operator. */ #define FOLD_EXPR_MODIFY_P(NODE) \ TREE_LANG_FLAG_0 (FOLD_EXPR_CHECK (NODE)) /* An INTEGER_CST containing the tree code of the folded operator. */ #define FOLD_EXPR_OP(NODE) \ TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 0) /* The expression containing an unexpanded parameter pack. */ #define FOLD_EXPR_PACK(NODE) \ TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 1) /* In a binary fold expression, the argument with no unexpanded parameter packs. */ #define FOLD_EXPR_INIT(NODE) \ TREE_OPERAND (BINARY_FOLD_EXPR_CHECK (NODE), 2) /* In a FUNCTION_DECL, the saved language-specific per-function data. */ #define DECL_SAVED_FUNCTION_DATA(NODE) \ (LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \ ->u.saved_language_function) /* True if NODE is an implicit INDIRECT_EXPR from convert_from_reference. */ #define REFERENCE_REF_P(NODE) \ (INDIRECT_REF_P (NODE) \ && TREE_TYPE (TREE_OPERAND (NODE, 0)) \ && (TREE_CODE (TREE_TYPE (TREE_OPERAND ((NODE), 0))) \ == REFERENCE_TYPE)) /* True if NODE is a REFERENCE_TYPE which is OK to instantiate to be a reference to VLA type, because it's used for VLA capture. */ #define REFERENCE_VLA_OK(NODE) \ (TYPE_LANG_FLAG_5 (REFERENCE_TYPE_CHECK (NODE))) #define NEW_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_VEC(NODE) \ TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE)) /* Indicates that this is a non-dependent COMPOUND_EXPR which will resolve to a function call. */ #define COMPOUND_EXPR_OVERLOADED(NODE) \ TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE)) /* In a CALL_EXPR appearing in a template, true if Koenig lookup should be performed at instantiation time. */ #define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE)) /* True if CALL_EXPR expresses list-initialization of an object. */ #define CALL_EXPR_LIST_INIT_P(NODE) \ TREE_LANG_FLAG_3 (TREE_CHECK2 ((NODE),CALL_EXPR,AGGR_INIT_EXPR)) /* Indicates whether a string literal has been parenthesized. Such usages are disallowed in certain circumstances. */ #define PAREN_STRING_LITERAL_P(NODE) \ TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE)) /* Indicates whether a COMPONENT_REF or a SCOPE_REF has been parenthesized, or an INDIRECT_REF comes from parenthesizing a _DECL. Currently only set some of the time in C++14 mode. */ #define REF_PARENTHESIZED_P(NODE) \ TREE_LANG_FLAG_2 (TREE_CHECK3 ((NODE), COMPONENT_REF, INDIRECT_REF, SCOPE_REF)) /* Nonzero if this AGGR_INIT_EXPR provides for initialization via a constructor call, rather than an ordinary function call. */ #define AGGR_INIT_VIA_CTOR_P(NODE) \ TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE)) /* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize the object. */ #define AGGR_INIT_ZERO_FIRST(NODE) \ TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE)) /* Nonzero means that the call is the jump from a thunk to the thunked-to function. */ #define AGGR_INIT_FROM_THUNK_P(NODE) \ (AGGR_INIT_EXPR_CHECK (NODE)->base.protected_flag) /* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of CALL_EXPR_STATIC_CHAIN). */ #define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1) #define AGGR_INIT_EXPR_SLOT(NODE) \ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2) #define AGGR_INIT_EXPR_ARG(NODE, I) \ TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3) #define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3) /* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE. We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if the argument count is zero when checking is enabled. Instead, do the pointer arithmetic to advance past the 3 fixed operands in a AGGR_INIT_EXPR. That produces a valid pointer to just past the end of the operand array, even if it's not valid to dereference it. */ #define AGGR_INIT_EXPR_ARGP(NODE) \ (&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3) /* Abstract iterators for AGGR_INIT_EXPRs. */ /* Structure containing iterator state. */ struct aggr_init_expr_arg_iterator { tree t; /* the aggr_init_expr */ int n; /* argument count */ int i; /* next argument index */ }; /* Initialize the abstract argument list iterator object ITER with the arguments from AGGR_INIT_EXPR node EXP. */ inline void init_aggr_init_expr_arg_iterator (tree exp, aggr_init_expr_arg_iterator *iter) { iter->t = exp; iter->n = aggr_init_expr_nargs (exp); iter->i = 0; } /* Return the next argument from abstract argument list iterator object ITER, and advance its state. Return NULL_TREE if there are no more arguments. */ inline tree next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter) { tree result; if (iter->i >= iter->n) return NULL_TREE; result = AGGR_INIT_EXPR_ARG (iter->t, iter->i); iter->i++; return result; } /* Initialize the abstract argument list iterator object ITER, then advance past and return the first argument. Useful in for expressions, e.g. for (arg = first_aggr_init_expr_arg (exp, &iter); arg; arg = next_aggr_init_expr_arg (&iter)) */ inline tree first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter) { init_aggr_init_expr_arg_iterator (exp, iter); return next_aggr_init_expr_arg (iter); } /* Test whether there are more arguments in abstract argument list iterator ITER, without changing its state. */ inline bool more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter) { return (iter->i < iter->n); } /* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */ #define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \ for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \ (arg) = next_aggr_init_expr_arg (&(iter))) /* VEC_INIT_EXPR accessors. */ #define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0) #define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1) /* Indicates that a VEC_INIT_EXPR is a potential constant expression. Only set when the current function is constexpr. */ #define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \ TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE)) /* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */ #define VEC_INIT_EXPR_VALUE_INIT(NODE) \ TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE)) /* The condition under which this MUST_NOT_THROW_EXPR actually blocks exceptions. NULL_TREE means 'true'. */ #define MUST_NOT_THROW_COND(NODE) \ TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1) /* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a TEMPLATE_DECL. This macro determines whether or not a given class type is really a template type, as opposed to an instantiation or specialization of one. */ #define CLASSTYPE_IS_TEMPLATE(NODE) \ (CLASSTYPE_TEMPLATE_INFO (NODE) \ && !CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) /* The name used by the user to name the typename type. Typically, this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the corresponding TYPE_DECL. However, this may also be a TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */ #define TYPENAME_TYPE_FULLNAME(NODE) \ (TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE was declared as an "enum". */ #define TYPENAME_IS_ENUM_P(NODE) \ (TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE was declared as a "class", "struct", or "union". */ #define TYPENAME_IS_CLASS_P(NODE) \ (TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE is in the process of being resolved. */ #define TYPENAME_IS_RESOLVING_P(NODE) \ (TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE))) /* [class.virtual] A class that declares or inherits a virtual function is called a polymorphic class. */ #define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE)) /* Nonzero if this class has a virtual function table pointer. */ #define TYPE_CONTAINS_VPTR_P(NODE) \ (TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE)) /* This flag is true of a local VAR_DECL if it was declared in a for statement, but we are no longer in the scope of the for. */ #define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE)) /* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL if we already emitted a warning about using it. */ #define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)) /* Nonzero if NODE is a FUNCTION_DECL (for a function with global scope) declared in a local scope. */ #define DECL_LOCAL_FUNCTION_P(NODE) \ DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE)) /* Nonzero if NODE is the target for genericization of 'break' stmts. */ #define LABEL_DECL_BREAK(NODE) \ DECL_LANG_FLAG_0 (LABEL_DECL_CHECK (NODE)) /* Nonzero if NODE is the target for genericization of 'continue' stmts. */ #define LABEL_DECL_CONTINUE(NODE) \ DECL_LANG_FLAG_1 (LABEL_DECL_CHECK (NODE)) /* True if NODE was declared with auto in its return type, but it has started compilation and so the return type might have been changed by return type deduction; its declared return type should be found in DECL_STRUCT_FUNCTION(NODE)->language->x_auto_return_pattern. */ #define FNDECL_USED_AUTO(NODE) \ TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE)) /* Nonzero if NODE is a DECL which we know about but which has not been explicitly declared, such as a built-in function or a friend declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P will be set. */ #define DECL_ANTICIPATED(NODE) \ (DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \ ->u.base.anticipated_p) /* True for artificial decls added for OpenMP privatized non-static data members. */ #define DECL_OMP_PRIVATIZED_MEMBER(NODE) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.anticipated_p) /* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend within a class but has not been declared in the surrounding scope. The function is invisible except via argument dependent lookup. */ #define DECL_HIDDEN_FRIEND_P(NODE) \ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p) /* Nonzero if NODE is an artificial FUNCTION_DECL for #pragma omp declare reduction. */ #define DECL_OMP_DECLARE_REDUCTION_P(NODE) \ (LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p) /* Nonzero if DECL has been declared threadprivate by #pragma omp threadprivate. */ #define CP_DECL_THREADPRIVATE_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p) /* Nonzero if DECL was declared with '= delete'. */ #define DECL_DELETED_FN(DECL) \ (LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p) /* Nonzero if DECL was declared with '= default' (maybe implicitly). */ #define DECL_DEFAULTED_FN(DECL) \ (LANG_DECL_FN_CHECK (DECL)->defaulted_p) /* Nonzero if DECL is explicitly defaulted in the class body. */ #define DECL_DEFAULTED_IN_CLASS_P(DECL) \ (DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL)) /* Nonzero if DECL was defaulted outside the class body. */ #define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \ (DECL_DEFAULTED_FN (DECL) \ && !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL))) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* Returns nonzero if DECL has external linkage, as specified by the language standard. (This predicate may hold even when the corresponding entity is not actually given external linkage in the object file; see decl_linkage for details.) */ #define DECL_EXTERNAL_LINKAGE_P(DECL) \ (decl_linkage (DECL) == lk_external) /* Keep these codes in ascending code order. */ #define INTEGRAL_CODE_P(CODE) \ ((CODE) == ENUMERAL_TYPE \ || (CODE) == BOOLEAN_TYPE \ || (CODE) == INTEGER_TYPE) /* [basic.fundamental] Types bool, char, wchar_t, and the signed and unsigned integer types are collectively called integral types. Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration types as well, which is incorrect in C++. Keep these checks in ascending code order. */ #define CP_INTEGRAL_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || TREE_CODE (TYPE) == INTEGER_TYPE) /* Returns true if TYPE is an integral or enumeration name. Keep these checks in ascending code order. */ #define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE)) /* Returns true if TYPE is an integral or unscoped enumeration type. */ #define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \ (UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE)) /* True if the class type TYPE is a literal type. */ #define CLASSTYPE_LITERAL_P(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->is_literal) /* [basic.fundamental] Integral and floating types are collectively called arithmetic types. As a GNU extension, we also accept complex types. Keep these checks in ascending code order. */ #define ARITHMETIC_TYPE_P(TYPE) \ (CP_INTEGRAL_TYPE_P (TYPE) \ || TREE_CODE (TYPE) == REAL_TYPE \ || TREE_CODE (TYPE) == COMPLEX_TYPE) /* True iff TYPE is cv decltype(nullptr). */ #define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE) /* [basic.types] Arithmetic types, enumeration types, pointer types, pointer-to-member types, and std::nullptr_t are collectively called scalar types. Keep these checks in ascending code order. */ #define SCALAR_TYPE_P(TYPE) \ (TYPE_PTRDATAMEM_P (TYPE) \ || TREE_CODE (TYPE) == ENUMERAL_TYPE \ || ARITHMETIC_TYPE_P (TYPE) \ || TYPE_PTR_P (TYPE) \ || TYPE_PTRMEMFUNC_P (TYPE) \ || NULLPTR_TYPE_P (TYPE)) /* Determines whether this type is a C++0x scoped enumeration type. Scoped enumerations types are introduced via "enum class" or "enum struct", e.g., enum class Color { Red, Green, Blue }; Scoped enumeration types are different from normal (unscoped) enumeration types in several ways: - The enumerators of a scoped enumeration type are only available within the scope of the enumeration type and not in the enclosing scope. For example, the Red color can be referred to with "Color::Red" but not "Red". - Scoped enumerators and enumerations do not implicitly convert to integers or 'bool'. - The underlying type of the enum is well-defined. */ #define SCOPED_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE)) /* Determine whether this is an unscoped enumeration type. */ #define UNSCOPED_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE)) /* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped enumeration type (1) or a normal (unscoped) enumeration type (0). */ #define SET_SCOPED_ENUM_P(TYPE, VAL) \ (ENUM_IS_SCOPED (TYPE) = (VAL)) #define SET_OPAQUE_ENUM_P(TYPE, VAL) \ (ENUM_IS_OPAQUE (TYPE) = (VAL)) #define OPAQUE_ENUM_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE)) /* Determines whether an ENUMERAL_TYPE has an explicit underlying type. */ #define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE)) /* Returns the underlying type of the given enumeration type. The underlying type is determined in different ways, depending on the properties of the enum: - In C++0x, the underlying type can be explicitly specified, e.g., enum E1 : char { ... } // underlying type is char - In a C++0x scoped enumeration, the underlying type is int unless otherwises specified: enum class E2 { ... } // underlying type is int - Otherwise, the underlying type is determined based on the values of the enumerators. In this case, the ENUM_UNDERLYING_TYPE will not be set until after the definition of the enumeration is completed by finish_enum. */ #define ENUM_UNDERLYING_TYPE(TYPE) \ TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE)) /* [dcl.init.aggr] An aggregate is an array or a class with no user-provided constructors, no brace-or-equal-initializers for non-static data members, no private or protected non-static data members, no base classes, and no virtual functions. As an extension, we also treat vectors as aggregates. Keep these checks in ascending code order. */ #define CP_AGGREGATE_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == VECTOR_TYPE \ ||TREE_CODE (TYPE) == ARRAY_TYPE \ || (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE))) /* Nonzero for a class type means that the class type has a user-declared constructor. */ #define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE)) /* Nonzero means that the FUNCTION_TYPE or METHOD_TYPE has a late-specified return type. */ #define TYPE_HAS_LATE_RETURN_TYPE(NODE) \ (TYPE_LANG_FLAG_2 (FUNC_OR_METHOD_CHECK (NODE))) /* When appearing in an INDIRECT_REF, it means that the tree structure underneath is actually a call to a constructor. This is needed when the constructor must initialize local storage (which can be automatically destroyed), rather than allowing it to allocate space from the heap. When appearing in a SAVE_EXPR, it means that underneath is a call to a constructor. When appearing in a CONSTRUCTOR, the expression is a compound literal. When appearing in a FIELD_DECL, it means that this field has been duly initialized in its constructor. */ #define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE)) /* True if NODE is a brace-enclosed initializer. */ #define BRACE_ENCLOSED_INITIALIZER_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node) /* True if NODE is a compound-literal, i.e., a brace-enclosed initializer cast to a particular type. */ #define COMPOUND_LITERAL_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE)) #define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \ && vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\ && !TREE_HAS_CONSTRUCTOR (NODE)) /* True if NODE is a init-list used as a direct-initializer, i.e. B b{1,2}, not B b({1,2}) or B b = {1,2}. */ #define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE))) /* True if an uninitialized element in NODE should not be treated as implicitly value-initialized. Only used in constexpr evaluation. */ #define CONSTRUCTOR_NO_IMPLICIT_ZERO(NODE) \ (TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (NODE))) /* True if this CONSTRUCTOR should not be used as a variable initializer because it was loaded from a constexpr variable with mutable fields. */ #define CONSTRUCTOR_MUTABLE_POISON(NODE) \ (TREE_LANG_FLAG_2 (CONSTRUCTOR_CHECK (NODE))) #define DIRECT_LIST_INIT_P(NODE) \ (BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE)) /* True if NODE represents a conversion for direct-initialization in a template. Set by perform_implicit_conversion_flags. */ #define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \ (TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE))) /* Nonzero means that an object of this type can not be initialized using an initializer list. */ #define CLASSTYPE_NON_AGGREGATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate) #define TYPE_NON_AGGREGATE_CLASS(NODE) \ (CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE)) /* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */ #define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign) /* Nonzero if there is a non-trivial X::X(cv X&) for this class. */ #define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor) /* Nonzero if there is a non-trivial X::op=(X&&) for this class. */ #define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign) /* Nonzero if there is a non-trivial X::X(X&&) for this class. */ #define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor) /* Nonzero if there is no trivial default constructor for this class. */ #define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt) /* Nonzero if TYPE has a trivial destructor. From [class.dtor]: A destructor is trivial if it is an implicitly declared destructor and if: - all of the direct base classes of its class have trivial destructors, - for all of the non-static data members of its class that are of class type (or array thereof), each such class has a trivial destructor. */ #define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \ (!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE)) /* Nonzero for _TYPE node means that this type does not have a trivial destructor. Therefore, destroying an object of this type will involve a call to a destructor. This can apply to objects of ARRAY_TYPE is the type of the elements needs a destructor. */ #define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \ (TYPE_LANG_FLAG_4 (NODE)) /* Nonzero for class type means that the default constructor is trivial. */ #define TYPE_HAS_TRIVIAL_DFLT(NODE) \ (TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE)) /* Nonzero for class type means that copy initialization of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \ (TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE)) /* Nonzero for class type means that assignment of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \ (TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE)) /* Returns true if NODE is a pointer-to-data-member. */ #define TYPE_PTRDATAMEM_P(NODE) \ (TREE_CODE (NODE) == OFFSET_TYPE) /* Returns true if NODE is a pointer. */ #define TYPE_PTR_P(NODE) \ (TREE_CODE (NODE) == POINTER_TYPE) /* Returns true if NODE is an object type: [basic.types] An object type is a (possibly cv-qualified) type that is not a function type, not a reference type, and not a void type. Keep these checks in ascending order, for speed. */ #define TYPE_OBJ_P(NODE) \ (TREE_CODE (NODE) != REFERENCE_TYPE \ && !VOID_TYPE_P (NODE) \ && TREE_CODE (NODE) != FUNCTION_TYPE \ && TREE_CODE (NODE) != METHOD_TYPE) /* Returns true if NODE is a pointer to an object. Keep these checks in ascending tree code order. */ #define TYPE_PTROB_P(NODE) \ (TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a reference to an object. Keep these checks in ascending tree code order. */ #define TYPE_REF_OBJ_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a pointer to an object, or a pointer to void. Keep these checks in ascending tree code order. */ #define TYPE_PTROBV_P(NODE) \ (TYPE_PTR_P (NODE) \ && !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \ || TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)) /* Returns true if NODE is a pointer to function type. */ #define TYPE_PTRFN_P(NODE) \ (TYPE_PTR_P (NODE) \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Returns true if NODE is a reference to function type. */ #define TYPE_REFFN_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Returns true if NODE is a pointer to member function type. */ #define TYPE_PTRMEMFUNC_P(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_PTRMEMFUNC_FLAG (NODE)) #define TYPE_PTRMEMFUNC_FLAG(NODE) \ (TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE))) /* Returns true if NODE is a pointer-to-member. */ #define TYPE_PTRMEM_P(NODE) \ (TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE)) /* Returns true if NODE is a pointer or a pointer-to-member. */ #define TYPE_PTR_OR_PTRMEM_P(NODE) \ (TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE)) /* Indicates when overload resolution may resolve to a pointer to member function. [expr.unary.op]/3 */ #define PTRMEM_OK_P(NODE) \ TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF)) /* Get the POINTER_TYPE to the METHOD_TYPE associated with this pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true, before using this macro. */ #define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \ (cp_build_qualified_type (TREE_TYPE (TYPE_FIELDS (NODE)),\ cp_type_quals (NODE))) /* As above, but can be used in places that want an lvalue at the expense of not necessarily having the correct cv-qualifiers. */ #define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) \ (TREE_TYPE (TYPE_FIELDS (NODE))) /* Returns `A' for a type like `int (A::*)(double)' */ #define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \ TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* These are use to manipulate the canonical RECORD_TYPE from the hashed POINTER_TYPE, and can only be used on the POINTER_TYPE. */ #define TYPE_GET_PTRMEMFUNC_TYPE(NODE) \ (TYPE_LANG_SPECIFIC (NODE) ? LANG_TYPE_PTRMEM_CHECK (NODE)->record : NULL) #define TYPE_SET_PTRMEMFUNC_TYPE(NODE, VALUE) \ do { \ if (TYPE_LANG_SPECIFIC (NODE) == NULL) \ { \ TYPE_LANG_SPECIFIC (NODE) \ = (struct lang_type *) ggc_internal_cleared_alloc \ (sizeof (struct lang_type_ptrmem)); \ TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.h.is_lang_type_class = 0; \ } \ TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.record = (VALUE); \ } while (0) /* For a pointer-to-member type of the form `T X::*', this is `X'. For a type like `void (X::*)() const', this type is `X', not `const X'. To get at the `const X' you have to look at the TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have type `const X*'. */ #define TYPE_PTRMEM_CLASS_TYPE(NODE) \ (TYPE_PTRDATAMEM_P (NODE) \ ? TYPE_OFFSET_BASETYPE (NODE) \ : TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE)) /* For a pointer-to-member type of the form `T X::*', this is `T'. */ #define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \ (TYPE_PTRDATAMEM_P (NODE) \ ? TREE_TYPE (NODE) \ : TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for `X'. */ #define PTRMEM_CST_CLASS(NODE) \ TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE))) /* For a pointer-to-member constant `X::Y' this is the _DECL for `Y'. */ #define PTRMEM_CST_MEMBER(NODE) (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member) /* The expression in question for a TYPEOF_TYPE. */ #define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE))) /* The type in question for an UNDERLYING_TYPE. */ #define UNDERLYING_TYPE_TYPE(NODE) \ (TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE))) /* The type in question for BASES. */ #define BASES_TYPE(NODE) \ (TYPE_VALUES_RAW (BASES_CHECK (NODE))) #define BASES_DIRECT(NODE) \ TREE_LANG_FLAG_0 (BASES_CHECK (NODE)) /* The expression in question for a DECLTYPE_TYPE. */ #define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE))) /* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an id-expression or a member-access expression. When false, it was parsed as a full expression. */ #define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \ (DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag /* These flags indicate that we want different semantics from normal decltype: lambda capture just drops references, init capture uses auto semantics, lambda proxies look through implicit dereference. */ #define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \ TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE)) #define DECLTYPE_FOR_INIT_CAPTURE(NODE) \ TREE_LANG_FLAG_1 (DECLTYPE_TYPE_CHECK (NODE)) #define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \ TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_EXTERN(NODE) \ DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_STATIC(NODE) \ DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a lambda capture field for an array of runtime bound. */ #define DECL_VLA_CAPTURE_P(NODE) \ DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE)) /* Nonzero for PARM_DECL node means that this is an array function parameter, i.e, a[] rather than *a. */ #define DECL_ARRAY_PARAMETER_P(NODE) \ DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE)) /* Nonzero for a FIELD_DECL who's NSMDI is currently being instantiated. */ #define DECL_INSTANTIATING_NSDMI_P(NODE) \ DECL_LANG_FLAG_2 (FIELD_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a base class of the parent object, as opposed to a member field. */ #define DECL_FIELD_IS_BASE(NODE) \ DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a simple (no explicit initializer) lambda capture field, making it invisible to name lookup in unevaluated contexts. */ #define DECL_NORMAL_CAPTURE_P(NODE) \ DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE)) /* Nonzero if TYPE is an anonymous union or struct type. We have to use a flag for this because "A union for which objects or pointers are declared is not an anonymous union" [class.union]. */ #define ANON_AGGR_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr) #define SET_ANON_AGGR_TYPE_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1) /* Nonzero if TYPE is an anonymous union type. */ #define ANON_UNION_TYPE_P(NODE) \ (TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE)) /* Define fields and accessors for nodes representing declared names. */ #define TYPE_WAS_ANONYMOUS(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous) /* C++: all of these are overloaded! These apply only to TYPE_DECLs. */ /* The format of each node in the DECL_FRIENDLIST is as follows: The TREE_PURPOSE will be the name of a function, i.e., an IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose TREE_VALUEs are friends with the given name. */ #define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE)) #define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST)) #define FRIEND_DECLS(LIST) (TREE_VALUE (LIST)) /* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of each node is a type; the TREE_VALUE is the access granted for this DECL in that type. The DECL_ACCESS is set by access declarations. For example, if a member that would normally be public in a derived class is made protected, then the derived class and the protected_access_node will appear in the DECL_ACCESS for the node. */ #define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access) /* Nonzero if the FUNCTION_DECL is a global constructor. */ #define DECL_GLOBAL_CTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->global_ctor_p) /* Nonzero if the FUNCTION_DECL is a global destructor. */ #define DECL_GLOBAL_DTOR_P(NODE) \ (LANG_DECL_FN_CHECK (NODE)->global_dtor_p) /* Accessor macros for C++ template decl nodes. */ /* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node is a INT_CST whose TREE_INT_CST_LOW indicates the level of the template parameters, with 1 being the outermost set of template parameters. The TREE_VALUE is a vector, whose elements are the template parameters at each level. Each element in the vector is a TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a non-type parameter), or a TYPE_DECL (if the parameter is a type parameter). The TREE_PURPOSE is the default value, if any. The TEMPLATE_PARM_INDEX for the parameter is available as the DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a TYPE_DECL). FIXME: CONST_CAST_TREE is a hack that hopefully will go away after tree is converted to C++ class hiearchy. */ #define DECL_TEMPLATE_PARMS(NODE) \ ((struct tree_template_decl *)CONST_CAST_TREE (TEMPLATE_DECL_CHECK (NODE)))->arguments #define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \ INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE)) #define DECL_NTPARMS(NODE) \ TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE)) /* For function, method, class-data templates. FIXME: CONST_CAST_TREE is a hack that hopefully will go away after tree is converted to C++ class hiearchy. */ #define DECL_TEMPLATE_RESULT(NODE) \ ((struct tree_template_decl *)CONST_CAST_TREE(TEMPLATE_DECL_CHECK (NODE)))->result /* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS lists all instantiations and specializations of the function so that tsubst_friend_function can reassign them to another template if we find that the namespace-scope template is really a partial instantiation of a friend template. For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds all instantiations and specializations of the class type, including partial instantiations and partial specializations, so that if we explicitly specialize a partial instantiation we can walk the list in maybe_process_partial_specialization and reassign them or complain as appropriate. In both cases, the TREE_PURPOSE of each node contains the arguments used; the TREE_VALUE contains the generated variable. The template arguments are always complete. For example, given: template <class T> struct S1 { template <class U> struct S2 {}; template <class U> struct S2<U*> {}; }; the record for the partial specialization will contain, as its argument list, { {T}, {U*} }, and will be on the DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template <class U> struct S1<T>::S2'. This list is not used for other templates. */ #define DECL_TEMPLATE_INSTANTIATIONS(NODE) \ DECL_SIZE_UNIT (TEMPLATE_DECL_CHECK (NODE)) /* For a class template, this list contains the partial specializations of this template. (Full specializations are not recorded on this list.) The TREE_PURPOSE holds the arguments used in the partial specialization (e.g., for `template <class T> struct S<T*, int>' this will be `T*, int'.) The arguments will also include any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL for the partial specialization. The TREE_TYPE is the _TYPE node for the partial specialization. This list is not used for other templates. */ #define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \ DECL_SIZE (TEMPLATE_DECL_CHECK (NODE)) /* Nonzero for a DECL which is actually a template parameter. Keep these checks in ascending tree code order. */ #define DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) \ && (TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == PARM_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL)) /* Mark NODE as a template parameter. */ #define SET_DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) = 1) /* Nonzero if NODE is a template template parameter. */ #define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE)) /* Nonzero for a DECL that represents a function template. */ #define DECL_FUNCTION_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL) /* Nonzero for a DECL that represents a class template or alias template. */ #define DECL_TYPE_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL) /* Nonzero for a DECL that represents a class template. */ #define DECL_CLASS_TEMPLATE_P(NODE) \ (DECL_TYPE_TEMPLATE_P (NODE) \ && DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE))) /* Nonzero for a TEMPLATE_DECL that represents an alias template. */ #define DECL_ALIAS_TEMPLATE_P(NODE) \ (DECL_TYPE_TEMPLATE_P (NODE) \ && !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE))) /* Nonzero for a NODE which declares a type. */ #define DECL_DECLARES_TYPE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE)) /* Nonzero if NODE declares a function. */ #define DECL_DECLARES_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE)) /* Nonzero if NODE is the typedef implicitly generated for a type when the type is declared. In C++, `struct S {};' is roughly equivalent to `struct S {}; typedef struct S S;' in C. DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this example. In C++, there is a second implicit typedef for each class, in the scope of `S' itself, so that you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that second typedef. */ #define DECL_IMPLICIT_TYPEDEF_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE)) #define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \ (DECL_LANG_FLAG_2 (NODE) = 1) #define DECL_SELF_REFERENCE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE)) #define SET_DECL_SELF_REFERENCE_P(NODE) \ (DECL_LANG_FLAG_4 (NODE) = 1) /* A `primary' template is one that has its own template header and is not a partial specialization. A member function of a class template is a template, but not primary. A member template is primary. Friend templates are primary, too. */ /* Returns the primary template corresponding to these parameters. */ #define DECL_PRIMARY_TEMPLATE(NODE) \ (TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE))) /* Returns nonzero if NODE is a primary template. */ #define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE)) /* Nonzero iff NODE is a specialization of a template. The value indicates the type of specializations: 1=implicit instantiation 2=partial or explicit specialization, e.g.: template <> int min<int> (int, int), 3=explicit instantiation, e.g.: template int min<int> (int, int); Note that NODE will be marked as a specialization even if the template it is instantiating is not a primary template. For example, given: template <typename T> struct O { void f(); struct I {}; }; both O<int>::f and O<int>::I will be marked as instantiations. If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also be non-NULL. */ #define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template) /* Like DECL_USE_TEMPLATE, but for class types. */ #define CLASSTYPE_USE_TEMPLATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->use_template) /* True if NODE is a specialization of a primary template. */ #define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \ (CLASS_TYPE_P (NODE) \ && CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) #define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1) #define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) & 1) #define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2) #define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2) /* Returns true for an explicit or partial specialization of a class template. */ #define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 2) #define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 2) #define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1) #define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1) #define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 1) #define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 1) #define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3) #define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3) #define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 3) #define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 3) /* Nonzero if DECL is a friend function which is an instantiation from the point of view of the compiler, but not from the point of view of the language. For example given: template <class T> struct S { friend void f(T) {}; }; the declaration of `void f(int)' generated when S<int> is instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */ #define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \ (DECL_TEMPLATE_INFO (DECL) && !DECL_USE_TEMPLATE (DECL)) /* Nonzero if DECL is a function generated from a function 'temploid', i.e. template, member of class template, or dependent friend. */ #define DECL_TEMPLOID_INSTANTIATION(DECL) \ (DECL_TEMPLATE_INSTANTIATION (DECL) \ || DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL)) /* Nonzero if DECL is either defined implicitly by the compiler or generated from a temploid. */ #define DECL_GENERATED_P(DECL) \ (DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL)) /* Nonzero iff we are currently processing a declaration for an entity with its own template parameter list, and which is not a full specialization. */ #define PROCESSING_REAL_TEMPLATE_DECL_P() \ (processing_template_decl > template_class_depth (current_scope ())) /* Nonzero if this VAR_DECL or FUNCTION_DECL has already been instantiated, i.e. its definition has been generated from the pattern given in the template. */ #define DECL_TEMPLATE_INSTANTIATED(NODE) \ DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE)) /* We know what we're doing with this decl now. */ #define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE) /* DECL_EXTERNAL must be set on a decl until the decl is actually emitted, so that assemble_external will work properly. So we have this flag to tell us whether the decl is really not external. This flag does not indicate whether or not the decl is defined in the current translation unit; it indicates whether or not we should emit the decl at the end of compilation if it is defined and needed. */ #define DECL_NOT_REALLY_EXTERN(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern) #define DECL_REALLY_EXTERN(NODE) \ (DECL_EXTERNAL (NODE) \ && (!DECL_LANG_SPECIFIC (NODE) || !DECL_NOT_REALLY_EXTERN (NODE))) /* A thunk is a stub function. A thunk is an alternate entry point for an ordinary FUNCTION_DECL. The address of the ordinary FUNCTION_DECL is given by the DECL_INITIAL, which is always an ADDR_EXPR whose operand is a FUNCTION_DECL. The job of the thunk is to either adjust the this pointer before transferring control to the FUNCTION_DECL, or call FUNCTION_DECL and then adjust the result value. Note, the result pointer adjusting thunk must perform a call to the thunked function, (or be implemented via passing some invisible parameter to the thunked function, which is modified to perform the adjustment just before returning). A thunk may perform either, or both, of the following operations: o Adjust the this or result pointer by a constant offset. o Adjust the this or result pointer by looking up a vcall or vbase offset in the vtable. A this pointer adjusting thunk converts from a base to a derived class, and hence adds the offsets. A result pointer adjusting thunk converts from a derived class to a base, and hence subtracts the offsets. If both operations are performed, then the constant adjustment is performed first for this pointer adjustment and last for the result pointer adjustment. The constant adjustment is given by THUNK_FIXED_OFFSET. If the vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is used. For this pointer adjusting thunks, it is the vcall offset into the vtable. For result pointer adjusting thunks it is the binfo of the virtual base to convert to. Use that binfo's vbase offset. It is possible to have equivalent covariant thunks. These are distinct virtual covariant thunks whose vbase offsets happen to have the same value. THUNK_ALIAS is used to pick one as the canonical thunk, which will get all the this pointer adjusting thunks attached to it. */ /* An integer indicating how many bytes should be subtracted from the this or result pointer when this function is called. */ #define THUNK_FIXED_OFFSET(DECL) \ (DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset) /* A tree indicating how to perform the virtual adjustment. For a this adjusting thunk it is the number of bytes to be added to the vtable to find the vcall offset. For a result adjusting thunk, it is the binfo of the relevant virtual base. If NULL, then there is no virtual adjust. (The vptr is always located at offset zero from the this or result pointer.) (If the covariant type is within the class hierarchy being laid out, the vbase index is not yet known at the point we need to create the thunks, hence the need to use binfos.) */ #define THUNK_VIRTUAL_OFFSET(DECL) \ (LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access) /* A thunk which is equivalent to another thunk. */ #define THUNK_ALIAS(DECL) \ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info) /* For thunk NODE, this is the FUNCTION_DECL thunked to. It is possible for the target to be a thunk too. */ #define THUNK_TARGET(NODE) \ (LANG_DECL_FN_CHECK (NODE)->befriending_classes) /* True for a SCOPE_REF iff the "template" keyword was used to indicate that the qualified name denotes a template. */ #define QUALIFIED_NAME_IS_TEMPLATE(NODE) \ (TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE))) /* True for an OMP_ATOMIC that has dependent parameters. These are stored as an expr in operand 1, and integer_zero_node in operand 0. */ #define OMP_ATOMIC_DEPENDENT_P(NODE) \ (TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST) /* Used while gimplifying continue statements bound to OMP_FOR nodes. */ #define OMP_FOR_GIMPLIFYING_P(NODE) \ (TREE_LANG_FLAG_0 (OMP_LOOP_CHECK (NODE))) /* A language-specific token attached to the OpenMP data clauses to hold code (or code fragments) related to ctors, dtors, and op=. See semantics.c for details. */ #define CP_OMP_CLAUSE_INFO(NODE) \ TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \ OMP_CLAUSE_LINEAR)) /* Nonzero if this transaction expression's body contains statements. */ #define TRANSACTION_EXPR_IS_STMT(NODE) \ TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE)) /* These macros provide convenient access to the various _STMT nodes created when parsing template declarations. */ #define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0) #define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1) #define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0) #define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1) #define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0) /* Nonzero if this try block is a function try block. */ #define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE)) #define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0) #define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1) #define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE)) /* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run and the VAR_DECL for which this cleanup exists. */ #define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0) #define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1) #define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2) /* IF_STMT accessors. These give access to the condition of the if statement, the then block of the if statement, and the else block of the if statement if it exists. */ #define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0) #define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1) #define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2) #define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3) /* WHILE_STMT accessors. These give access to the condition of the while statement and the body of the while statement, respectively. */ #define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0) #define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1) /* DO_STMT accessors. These give access to the condition of the do statement and the body of the do statement, respectively. */ #define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0) #define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1) /* FOR_STMT accessors. These give access to the init statement, condition, update expression, and body of the for statement, respectively. */ #define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0) #define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1) #define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2) #define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3) #define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4) /* RANGE_FOR_STMT accessors. These give access to the declarator, expression, body, and scope of the statement, respectively. */ #define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0) #define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1) #define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2) #define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3) #define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE)) #define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0) #define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1) #define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2) #define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3) /* STMT_EXPR accessor. */ #define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0) /* EXPR_STMT accessor. This gives the expression associated with an expression statement. */ #define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0) /* True if this TARGET_EXPR was created by build_cplus_new, and so we can discard it if it isn't useful. */ #define TARGET_EXPR_IMPLICIT_P(NODE) \ TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE)) /* True if this TARGET_EXPR is the result of list-initialization of a temporary. */ #define TARGET_EXPR_LIST_INIT_P(NODE) \ TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE)) /* True if this TARGET_EXPR expresses direct-initialization of an object to be named later. */ #define TARGET_EXPR_DIRECT_INIT_P(NODE) \ TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE)) /* True if NODE is a TARGET_EXPR that just expresses a copy of its INITIAL; if the initializer has void type, it's doing something more complicated. */ #define SIMPLE_TARGET_EXPR_P(NODE) \ (TREE_CODE (NODE) == TARGET_EXPR \ && !VOID_TYPE_P (TREE_TYPE (TARGET_EXPR_INITIAL (NODE)))) /* True if EXPR expresses direct-initialization of a TYPE. */ #define DIRECT_INIT_EXPR_P(TYPE,EXPR) \ (TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \ && same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR))) /* True if this CONVERT_EXPR is for a conversion to virtual base in an NSDMI, and should be re-evaluated when used in a constructor. */ #define CONVERT_EXPR_VBASE_PATH(NODE) \ TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE)) /* True if SIZEOF_EXPR argument is type. */ #define SIZEOF_EXPR_TYPE_P(NODE) \ TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE)) /* An enumeration of the kind of tags that C++ accepts. */ enum tag_types { none_type = 0, /* Not a tag type. */ record_type, /* "struct" types. */ class_type, /* "class" types. */ union_type, /* "union" types. */ enum_type, /* "enum" types. */ typename_type, /* "typename" types. */ scope_type /* namespace or tagged type name followed by :: */ }; /* The various kinds of lvalues we distinguish. */ enum cp_lvalue_kind_flags { clk_none = 0, /* Things that are not an lvalue. */ clk_ordinary = 1, /* An ordinary lvalue. */ clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */ clk_class = 4, /* A prvalue of class-type. */ clk_bitfield = 8, /* An lvalue for a bit-field. */ clk_packed = 16 /* An lvalue for a packed field. */ }; /* This type is used for parameters and variables which hold combinations of the flags in enum cp_lvalue_kind_flags. */ typedef int cp_lvalue_kind; /* Various kinds of template specialization, instantiation, etc. */ enum tmpl_spec_kind { tsk_none, /* Not a template at all. */ tsk_invalid_member_spec, /* An explicit member template specialization, but the enclosing classes have not all been explicitly specialized. */ tsk_invalid_expl_inst, /* An explicit instantiation containing template parameter lists. */ tsk_excessive_parms, /* A template declaration with too many template parameter lists. */ tsk_insufficient_parms, /* A template declaration with too few parameter lists. */ tsk_template, /* A template declaration. */ tsk_expl_spec, /* An explicit specialization. */ tsk_expl_inst /* An explicit instantiation. */ }; /* The various kinds of access. BINFO_ACCESS depends on these being two bit quantities. The numerical values are important; they are used to initialize RTTI data structures, so changing them changes the ABI. */ enum access_kind { ak_none = 0, /* Inaccessible. */ ak_public = 1, /* Accessible, as a `public' thing. */ ak_protected = 2, /* Accessible, as a `protected' thing. */ ak_private = 3 /* Accessible, as a `private' thing. */ }; /* The various kinds of special functions. If you add to this list, you should update special_function_p as well. */ enum special_function_kind { sfk_none = 0, /* Not a special function. This enumeral must have value zero; see special_function_p. */ sfk_constructor, /* A constructor. */ sfk_copy_constructor, /* A copy constructor. */ sfk_move_constructor, /* A move constructor. */ sfk_copy_assignment, /* A copy assignment operator. */ sfk_move_assignment, /* A move assignment operator. */ sfk_destructor, /* A destructor. */ sfk_complete_destructor, /* A destructor for complete objects. */ sfk_base_destructor, /* A destructor for base subobjects. */ sfk_deleting_destructor, /* A destructor for complete objects that deletes the object after it has been destroyed. */ sfk_conversion, /* A conversion operator. */ sfk_inheriting_constructor /* An inheriting constructor */ }; /* The various kinds of linkage. From [basic.link], A name is said to have linkage when it might denote the same object, reference, function, type, template, namespace or value as a name introduced in another scope: -- When a name has external linkage, the entity it denotes can be referred to from scopes of other translation units or from other scopes of the same translation unit. -- When a name has internal linkage, the entity it denotes can be referred to by names from other scopes in the same translation unit. -- When a name has no linkage, the entity it denotes cannot be referred to by names from other scopes. */ enum linkage_kind { lk_none, /* No linkage. */ lk_internal, /* Internal linkage. */ lk_external /* External linkage. */ }; enum duration_kind { dk_static, dk_thread, dk_auto, dk_dynamic }; /* Bitmask flags to control type substitution. */ enum tsubst_flags { tf_none = 0, /* nothing special */ tf_error = 1 << 0, /* give error messages */ tf_warning = 1 << 1, /* give warnings too */ tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */ tf_keep_type_decl = 1 << 3, /* retain typedef type decls (make_typename_type use) */ tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal instantiate_type use) */ tf_user = 1 << 5, /* found template must be a user template (lookup_template_class use) */ tf_conv = 1 << 6, /* We are determining what kind of conversion might be permissible, not actually performing the conversion. */ tf_decltype = 1 << 7, /* We are the operand of decltype. Used to implement the special rules for calls in decltype (5.2.2/11). */ tf_partial = 1 << 8, /* Doing initial explicit argument substitution in fn_type_unification. */ /* Convenient substitution flags combinations. */ tf_warning_or_error = tf_warning | tf_error }; /* This type is used for parameters and variables which hold combinations of the flags in enum tsubst_flags. */ typedef int tsubst_flags_t; /* The kind of checking we can do looking in a class hierarchy. */ enum base_access_flags { ba_any = 0, /* Do not check access, allow an ambiguous base, prefer a non-virtual base */ ba_unique = 1 << 0, /* Must be a unique base. */ ba_check_bit = 1 << 1, /* Check access. */ ba_check = ba_unique | ba_check_bit, ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */ }; /* This type is used for parameters and variables which hold combinations of the flags in enum base_access_flags. */ typedef int base_access; /* The various kinds of access check during parsing. */ enum deferring_kind { dk_no_deferred = 0, /* Check access immediately */ dk_deferred = 1, /* Deferred check */ dk_no_check = 2 /* No access check */ }; /* The kind of base we can find, looking in a class hierarchy. Values <0 indicate we failed. */ enum base_kind { bk_inaccessible = -3, /* The base is inaccessible */ bk_ambig = -2, /* The base is ambiguous */ bk_not_base = -1, /* It is not a base */ bk_same_type = 0, /* It is the same type */ bk_proper_base = 1, /* It is a proper base */ bk_via_virtual = 2 /* It is a proper base, but via a virtual path. This might not be the canonical binfo. */ }; /* Node for "pointer to (virtual) function". This may be distinct from ptr_type_node so gdb can distinguish them. */ #define vfunc_ptr_type_node vtable_entry_type /* For building calls to `delete'. */ extern GTY(()) tree integer_two_node; /* The number of function bodies which we are currently processing. (Zero if we are at namespace scope, one inside the body of a function, two inside the body of a function in a local class, etc.) */ extern int function_depth; /* Nonzero if we are inside eq_specializations, which affects comparison of PARM_DECLs in cp_tree_equal. */ extern int comparing_specializations; /* In parser.c. */ /* Nonzero if we are parsing an unevaluated operand: an operand to sizeof, typeof, or alignof. This is a count since operands to sizeof can be nested. */ extern int cp_unevaluated_operand; /* RAII class used to inhibit the evaluation of operands during parsing and template instantiation. Evaluation warnings are also inhibited. */ struct cp_unevaluated { cp_unevaluated (); ~cp_unevaluated (); }; /* in pt.c */ /* These values are used for the `STRICT' parameter to type_unification and fn_type_unification. Their meanings are described with the documentation for fn_type_unification. */ enum unification_kind_t { DEDUCE_CALL, DEDUCE_CONV, DEDUCE_EXACT }; // An RAII class used to create a new pointer map for local // specializations. When the stack goes out of scope, the // previous pointer map is restored. struct local_specialization_stack { local_specialization_stack (); ~local_specialization_stack (); hash_map<tree, tree> *saved; }; /* in class.c */ extern int current_class_depth; /* An array of all local classes present in this translation unit, in declaration order. */ extern GTY(()) vec<tree, va_gc> *local_classes; /* Here's where we control how name mangling takes place. */ /* Cannot use '$' up front, because this confuses gdb (names beginning with '$' are gdb-local identifiers). Note that all forms in which the '$' is significant are long enough for direct indexing (meaning that if we know there is a '$' at a particular location, we can index into the string at any other location that provides distinguishing characters). */ /* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler doesn't allow '.' in symbol names. */ #ifndef NO_DOT_IN_LABEL #define JOINER '.' #define AUTO_TEMP_NAME "_.tmp_" #define VFIELD_BASE ".vf" #define VFIELD_NAME "_vptr." #define VFIELD_NAME_FORMAT "_vptr.%s" #else /* NO_DOT_IN_LABEL */ #ifndef NO_DOLLAR_IN_LABEL #define JOINER '$' #define AUTO_TEMP_NAME "_$tmp_" #define VFIELD_BASE "$vf" #define VFIELD_NAME "_vptr$" #define VFIELD_NAME_FORMAT "_vptr$%s" #else /* NO_DOLLAR_IN_LABEL */ #define AUTO_TEMP_NAME "__tmp_" #define TEMP_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \ sizeof (AUTO_TEMP_NAME) - 1)) #define VTABLE_NAME "__vt_" #define VTABLE_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \ sizeof (VTABLE_NAME) - 1)) #define VFIELD_BASE "__vfb" #define VFIELD_NAME "__vptr_" #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \ sizeof (VFIELD_NAME) - 1)) #define VFIELD_NAME_FORMAT "__vptr_%s" #endif /* NO_DOLLAR_IN_LABEL */ #endif /* NO_DOT_IN_LABEL */ #define THIS_NAME "this" #define IN_CHARGE_NAME "__in_chrg" #define VTBL_PTR_TYPE "__vtbl_ptr_type" #define VTABLE_DELTA_NAME "__delta" #define VTABLE_PFN_NAME "__pfn" #define LAMBDANAME_PREFIX "__lambda" #define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d" #define UDLIT_OP_ANSI_PREFIX "operator\"\"" #define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s" #define UDLIT_OP_MANGLED_PREFIX "li" #define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s" #define UDLIT_OPER_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), \ UDLIT_OP_ANSI_PREFIX, \ sizeof (UDLIT_OP_ANSI_PREFIX) - 1)) #define UDLIT_OP_SUFFIX(ID_NODE) \ (IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1) #if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) #define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \ && IDENTIFIER_POINTER (ID_NODE)[2] == 't' \ && IDENTIFIER_POINTER (ID_NODE)[3] == JOINER) #define TEMP_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1)) #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1)) #endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */ /* Nonzero if we're done parsing and into end-of-file activities. Two if we're done with front-end processing. */ extern int at_eof; /* True if note_mangling_alias should enqueue mangling aliases for later generation, rather than emitting them right away. */ extern bool defer_mangling_aliases; /* A list of namespace-scope objects which have constructors or destructors which reside in the global scope. The decl is stored in the TREE_VALUE slot and the initializer is stored in the TREE_PURPOSE slot. */ extern GTY(()) tree static_aggregates; /* Likewise, for thread local storage. */ extern GTY(()) tree tls_aggregates; enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG }; /* These are uses as bits in flags passed to various functions to control their behavior. Despite the LOOKUP_ prefix, many of these do not control name lookup. ??? Functions using these flags should probably be modified to accept explicit boolean flags for the behaviors relevant to them. */ /* Check for access violations. */ #define LOOKUP_PROTECT (1 << 0) #define LOOKUP_NORMAL (LOOKUP_PROTECT) /* Even if the function found by lookup is a virtual function, it should be called directly. */ #define LOOKUP_NONVIRTUAL (1 << 1) /* Non-converting (i.e., "explicit") constructors are not tried. This flag indicates that we are not performing direct-initialization. */ #define LOOKUP_ONLYCONVERTING (1 << 2) #define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING) /* If a temporary is created, it should be created so that it lives as long as the current variable bindings; otherwise it only lives until the end of the complete-expression. It also forces direct-initialization in cases where other parts of the compiler have already generated a temporary, such as reference initialization and the catch parameter. */ #define DIRECT_BIND (1 << 3) /* We're performing a user-defined conversion, so more user-defined conversions are not permitted (only built-in conversions). */ #define LOOKUP_NO_CONVERSION (1 << 4) /* The user has explicitly called a destructor. (Therefore, we do not need to check that the object is non-NULL before calling the destructor.) */ #define LOOKUP_DESTRUCTOR (1 << 5) /* Do not permit references to bind to temporaries. */ #define LOOKUP_NO_TEMP_BIND (1 << 6) /* Do not accept objects, and possibly namespaces. */ #define LOOKUP_PREFER_TYPES (1 << 7) /* Do not accept objects, and possibly types. */ #define LOOKUP_PREFER_NAMESPACES (1 << 8) /* Accept types or namespaces. */ #define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES) /* Return friend declarations and un-declared builtin functions. (Normally, these entities are registered in the symbol table, but not found by lookup.) */ #define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1) /* Prefer that the lvalue be treated as an rvalue. */ #define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1) /* We're inside an init-list, so narrowing conversions are ill-formed. */ #define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1) /* We're looking up a constructor for list-initialization. */ #define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1) /* This is the first parameter of a copy constructor. */ #define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1) /* We only want to consider list constructors. */ #define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1) /* Return after determining which function to call and checking access. Used by sythesized_method_walk to determine which functions will be called to initialize subobjects, in order to determine exception specification and possible implicit delete. This is kind of a hack, but exiting early avoids problems with trying to perform argument conversions when the class isn't complete yet. */ #define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1) /* Used by calls from defaulted functions to limit the overload set to avoid cycles trying to declare them (core issue 1092). */ #define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1) /* Used in calls to store_init_value to suppress its usual call to digest_init. */ #define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1) /* An instantiation with explicit template arguments. */ #define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1) /* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */ #define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1) /* Used by case_conversion to disregard non-integral conversions. */ #define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1) /* Used for delegating constructors in order to diagnose self-delegation. */ #define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1) #define LOOKUP_NAMESPACES_ONLY(F) \ (((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_TYPES_ONLY(F) \ (!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH) /* These flags are used by the conversion code. CONV_IMPLICIT : Perform implicit conversions (standard and user-defined). CONV_STATIC : Perform the explicit conversions for static_cast. CONV_CONST : Perform the explicit conversions for const_cast. CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast. CONV_PRIVATE : Perform upcasts to private bases. CONV_FORCE_TEMP : Require a new temporary when converting to the same aggregate type. */ #define CONV_IMPLICIT 1 #define CONV_STATIC 2 #define CONV_CONST 4 #define CONV_REINTERPRET 8 #define CONV_PRIVATE 16 /* #define CONV_NONCONVERTING 32 */ #define CONV_FORCE_TEMP 64 #define CONV_FOLD 128 #define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET) #define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP) #define CONV_BACKEND_CONVERT (CONV_OLD_CONVERT | CONV_FOLD) /* Used by build_expr_type_conversion to indicate which types are acceptable as arguments to the expression under consideration. */ #define WANT_INT 1 /* integer types, including bool */ #define WANT_FLOAT 2 /* floating point types */ #define WANT_ENUM 4 /* enumerated types */ #define WANT_POINTER 8 /* pointer types */ #define WANT_NULL 16 /* null pointer constant */ #define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */ #define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX) /* Used with comptypes, and related functions, to guide type comparison. */ #define COMPARE_STRICT 0 /* Just check if the types are the same. */ #define COMPARE_BASE 1 /* Check to see if the second type is derived from the first. */ #define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in reverse. */ #define COMPARE_REDECLARATION 4 /* The comparison is being done when another declaration of an existing entity is seen. */ #define COMPARE_STRUCTURAL 8 /* The comparison is intended to be structural. The actual comparison will be identical to COMPARE_STRICT. */ /* Used with push_overloaded_decl. */ #define PUSH_GLOBAL 0 /* Push the DECL into namespace scope, regardless of the current scope. */ #define PUSH_LOCAL 1 /* Push the DECL into the current scope. */ #define PUSH_USING 2 /* We are pushing this DECL as the result of a using declaration. */ /* Used with start function. */ #define SF_DEFAULT 0 /* No flags. */ #define SF_PRE_PARSED 1 /* The function declaration has already been parsed. */ #define SF_INCLASS_INLINE 2 /* The function is an inline, defined in the class body. */ /* Used with start_decl's initialized parameter. */ #define SD_UNINITIALIZED 0 #define SD_INITIALIZED 1 #define SD_DEFAULTED 2 #define SD_DELETED 3 /* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2 is derived from TYPE1, or if TYPE2 is a pointer (reference) to a class derived from the type pointed to (referred to) by TYPE1. */ #define same_or_base_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_BASE) /* These macros are used to access a TEMPLATE_PARM_INDEX. */ #define TEMPLATE_PARM_INDEX_CAST(NODE) \ ((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE)) #define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index) #define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level) #define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE)) #define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level) #define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl) #define TEMPLATE_PARM_PARAMETER_PACK(NODE) \ (TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE))) /* These macros are for accessing the fields of TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */ #define TEMPLATE_TYPE_PARM_INDEX(NODE) \ (TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \ TEMPLATE_TEMPLATE_PARM, \ BOUND_TEMPLATE_TEMPLATE_PARM))) #define TEMPLATE_TYPE_IDX(NODE) \ (TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_LEVEL(NODE) \ (TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \ (TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_DECL(NODE) \ (TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \ (TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE))) /* Contexts in which auto deduction occurs. These flags are used to control diagnostics in do_auto_deduction. */ enum auto_deduction_context { adc_unspecified, /* Not given */ adc_variable_type, /* Variable initializer deduction */ adc_return_type, /* Return type deduction */ adc_requirement /* Argument dedution constraint */ }; /* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */ #define AUTO_IS_DECLTYPE(NODE) \ (TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE))) /* These constants can used as bit flags in the process of tree formatting. TFF_PLAIN_IDENTIFIER: unqualified part of a name. TFF_SCOPE: include the class and namespace scope of the name. TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name. TFF_DECL_SPECIFIERS: print decl-specifiers. TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with a class-key (resp. `enum'). TFF_RETURN_TYPE: include function return type. TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values. TFF_EXCEPTION_SPECIFICATION: show function exception specification. TFF_TEMPLATE_HEADER: show the template<...> header in a template-declaration. TFF_TEMPLATE_NAME: show only template-name. TFF_EXPR_IN_PARENS: parenthesize expressions. TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments. TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the top-level entity. TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments identical to their defaults. TFF_NO_TEMPLATE_BINDINGS: do not print information about the template arguments for a function template specialization. TFF_POINTER: we are printing a pointer type. */ #define TFF_PLAIN_IDENTIFIER (0) #define TFF_SCOPE (1) #define TFF_CHASE_TYPEDEF (1 << 1) #define TFF_DECL_SPECIFIERS (1 << 2) #define TFF_CLASS_KEY_OR_ENUM (1 << 3) #define TFF_RETURN_TYPE (1 << 4) #define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5) #define TFF_EXCEPTION_SPECIFICATION (1 << 6) #define TFF_TEMPLATE_HEADER (1 << 7) #define TFF_TEMPLATE_NAME (1 << 8) #define TFF_EXPR_IN_PARENS (1 << 9) #define TFF_NO_FUNCTION_ARGUMENTS (1 << 10) #define TFF_UNQUALIFIED_NAME (1 << 11) #define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12) #define TFF_NO_TEMPLATE_BINDINGS (1 << 13) #define TFF_POINTER (1 << 14) /* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM node. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \ ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \ ? TYPE_TI_TEMPLATE (NODE) \ : TYPE_NAME (NODE)) /* in lex.c */ extern void init_reswords (void); typedef struct GTY(()) operator_name_info_t { /* The IDENTIFIER_NODE for the operator. */ tree identifier; /* The name of the operator. */ const char *name; /* The mangled name of the operator. */ const char *mangled_name; /* The arity of the operator. */ int arity; } operator_name_info_t; /* A mapping from tree codes to operator name information. */ extern GTY(()) operator_name_info_t operator_name_info [(int) MAX_TREE_CODES]; /* Similar, but for assignment operators. */ extern GTY(()) operator_name_info_t assignment_operator_name_info [(int) MAX_TREE_CODES]; /* A type-qualifier, or bitmask therefore, using the TYPE_QUAL constants. */ typedef int cp_cv_quals; /* Non-static member functions have an optional virt-specifier-seq. There is a VIRT_SPEC value for each virt-specifier. They can be combined by bitwise-or to form the complete set of virt-specifiers for a member function. */ enum virt_specifier { VIRT_SPEC_UNSPECIFIED = 0x0, VIRT_SPEC_FINAL = 0x1, VIRT_SPEC_OVERRIDE = 0x2 }; /* A type-qualifier, or bitmask therefore, using the VIRT_SPEC constants. */ typedef int cp_virt_specifiers; /* Wherever there is a function-cv-qual, there could also be a ref-qualifier: [dcl.fct] The return type, the parameter-type-list, the ref-qualifier, and the cv-qualifier-seq, but not the default arguments or the exception specification, are part of the function type. REF_QUAL_NONE Ordinary member function with no ref-qualifier REF_QUAL_LVALUE Member function with the &-ref-qualifier REF_QUAL_RVALUE Member function with the &&-ref-qualifier */ enum cp_ref_qualifier { REF_QUAL_NONE = 0, REF_QUAL_LVALUE = 1, REF_QUAL_RVALUE = 2 }; /* A storage class. */ enum cp_storage_class { /* sc_none must be zero so that zeroing a cp_decl_specifier_seq sets the storage_class field to sc_none. */ sc_none = 0, sc_auto, sc_register, sc_static, sc_extern, sc_mutable }; /* An individual decl-specifier. This is used to index the array of locations for the declspecs in struct cp_decl_specifier_seq below. */ enum cp_decl_spec { ds_first, ds_signed = ds_first, ds_unsigned, ds_short, ds_long, ds_const, ds_volatile, ds_restrict, ds_inline, ds_virtual, ds_explicit, ds_friend, ds_typedef, ds_alias, ds_constexpr, ds_complex, ds_thread, ds_type_spec, ds_redefined_builtin_type_spec, ds_attribute, ds_std_attribute, ds_storage_class, ds_long_long, ds_concept, ds_last /* This enumerator must always be the last one. */ }; /* A decl-specifier-seq. */ struct cp_decl_specifier_seq { /* An array of locations for the declaration sepecifiers, indexed by enum cp_decl_spec_word. */ source_location locations[ds_last]; /* The primary type, if any, given by the decl-specifier-seq. Modifiers, like "short", "const", and "unsigned" are not reflected here. This field will be a TYPE, unless a typedef-name was used, in which case it will be a TYPE_DECL. */ tree type; /* The attributes, if any, provided with the specifier sequence. */ tree attributes; /* The c++11 attributes that follows the type specifier. */ tree std_attributes; /* If non-NULL, a built-in type that the user attempted to redefine to some other type. */ tree redefined_builtin_type; /* The storage class specified -- or sc_none if no storage class was explicitly specified. */ cp_storage_class storage_class; /* For the __intN declspec, this stores the index into the int_n_* arrays. */ int int_n_idx; /* True iff TYPE_SPEC defines a class or enum. */ BOOL_BITFIELD type_definition_p : 1; /* True iff multiple types were (erroneously) specified for this decl-specifier-seq. */ BOOL_BITFIELD multiple_types_p : 1; /* True iff multiple storage classes were (erroneously) specified for this decl-specifier-seq or a combination of a storage class with a typedef specifier. */ BOOL_BITFIELD conflicting_specifiers_p : 1; /* True iff at least one decl-specifier was found. */ BOOL_BITFIELD any_specifiers_p : 1; /* True iff at least one type-specifier was found. */ BOOL_BITFIELD any_type_specifiers_p : 1; /* True iff "int" was explicitly provided. */ BOOL_BITFIELD explicit_int_p : 1; /* True iff "__intN" was explicitly provided. */ BOOL_BITFIELD explicit_intN_p : 1; /* True iff "char" was explicitly provided. */ BOOL_BITFIELD explicit_char_p : 1; /* True iff ds_thread is set for __thread, not thread_local. */ BOOL_BITFIELD gnu_thread_keyword_p : 1; /* True iff the type is a decltype. */ BOOL_BITFIELD decltype_p : 1; }; /* The various kinds of declarators. */ enum cp_declarator_kind { cdk_id, cdk_function, cdk_array, cdk_pointer, cdk_reference, cdk_ptrmem, cdk_error }; /* A declarator. */ typedef struct cp_declarator cp_declarator; typedef struct cp_parameter_declarator cp_parameter_declarator; /* A parameter, before it has been semantically analyzed. */ struct cp_parameter_declarator { /* The next parameter, or NULL_TREE if none. */ cp_parameter_declarator *next; /* The decl-specifiers-seq for the parameter. */ cp_decl_specifier_seq decl_specifiers; /* The declarator for the parameter. */ cp_declarator *declarator; /* The default-argument expression, or NULL_TREE, if none. */ tree default_argument; /* True iff this is a template parameter pack. */ bool template_parameter_pack_p; }; /* A declarator. */ struct cp_declarator { /* The kind of declarator. */ ENUM_BITFIELD (cp_declarator_kind) kind : 4; /* Whether we parsed an ellipsis (`...') just before the declarator, to indicate this is a parameter pack. */ BOOL_BITFIELD parameter_pack_p : 1; location_t id_loc; /* Currently only set for cdk_id and cdk_function. */ /* GNU Attributes that apply to this declarator. If the declarator is a pointer or a reference, these attribute apply to the type pointed to. */ tree attributes; /* Standard C++11 attributes that apply to this declarator. If the declarator is a pointer or a reference, these attributes apply to the pointer, rather than to the type pointed to. */ tree std_attributes; /* For all but cdk_id and cdk_error, the contained declarator. For cdk_id and cdk_error, guaranteed to be NULL. */ cp_declarator *declarator; union { /* For identifiers. */ struct { /* If non-NULL, the qualifying scope (a NAMESPACE_DECL or *_TYPE) for this identifier. */ tree qualifying_scope; /* The unqualified name of the entity -- an IDENTIFIER_NODE, BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */ tree unqualified_name; /* If this is the name of a function, what kind of special function (if any). */ special_function_kind sfk; } id; /* For functions. */ struct { /* The parameters to the function as a TREE_LIST of decl/default. */ tree parameters; /* The cv-qualifiers for the function. */ cp_cv_quals qualifiers; /* The virt-specifiers for the function. */ cp_virt_specifiers virt_specifiers; /* The ref-qualifier for the function. */ cp_ref_qualifier ref_qualifier; /* The transaction-safety qualifier for the function. */ tree tx_qualifier; /* The exception-specification for the function. */ tree exception_specification; /* The late-specified return type, if any. */ tree late_return_type; /* The trailing requires-clause, if any. */ tree requires_clause; } function; /* For arrays. */ struct { /* The bounds to the array. */ tree bounds; } array; /* For cdk_pointer and cdk_ptrmem. */ struct { /* The cv-qualifiers for the pointer. */ cp_cv_quals qualifiers; /* For cdk_ptrmem, the class type containing the member. */ tree class_type; } pointer; /* For cdk_reference */ struct { /* The cv-qualifiers for the reference. These qualifiers are only used to diagnose ill-formed code. */ cp_cv_quals qualifiers; /* Whether this is an rvalue reference */ bool rvalue_ref; } reference; } u; }; /* A level of template instantiation. */ struct GTY((chain_next ("%h.next"))) tinst_level { /* The immediately deeper level in the chain. */ struct tinst_level *next; /* The original node. Can be either a DECL (for a function or static data member) or a TYPE (for a class), depending on what we were asked to instantiate. */ tree decl; /* The location where the template is instantiated. */ location_t locus; /* errorcount+sorrycount when we pushed this level. */ int errors; /* True if the location is in a system header. */ bool in_system_header_p; }; bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec); /* Return the type of the `this' parameter of FNTYPE. */ inline tree type_of_this_parm (const_tree fntype) { function_args_iterator iter; gcc_assert (TREE_CODE (fntype) == METHOD_TYPE); function_args_iter_init (&iter, fntype); return function_args_iter_cond (&iter); } /* Return the class of the `this' parameter of FNTYPE. */ inline tree class_of_this_parm (const_tree fntype) { return TREE_TYPE (type_of_this_parm (fntype)); } /* True iff T is a variable template declaration. */ inline bool variable_template_p (tree t) { if (TREE_CODE (t) != TEMPLATE_DECL) return false; if (!PRIMARY_TEMPLATE_P (t)) return false; if (tree r = DECL_TEMPLATE_RESULT (t)) return VAR_P (r); return false; } /* True iff T is a variable concept definition. That is, T is a variable template declared with the concept specifier. */ inline bool variable_concept_p (tree t) { if (TREE_CODE (t) != TEMPLATE_DECL) return false; if (tree r = DECL_TEMPLATE_RESULT (t)) return VAR_P (r) && DECL_DECLARED_CONCEPT_P (r); return false; } /* True iff T is a concept definition. That is, T is a variable or function template declared with the concept specifier. */ inline bool concept_template_p (tree t) { if (TREE_CODE (t) != TEMPLATE_DECL) return false; if (tree r = DECL_TEMPLATE_RESULT (t)) return VAR_OR_FUNCTION_DECL_P (r) && DECL_DECLARED_CONCEPT_P (r); return false; } /* A parameter list indicating for a function with no parameters, e.g "int f(void)". */ extern cp_parameter_declarator *no_parameters; /* True if we saw "#pragma GCC java_exceptions". */ extern bool pragma_java_exceptions; /* in call.c */ extern bool check_dtor_name (tree, tree); int magic_varargs_p (tree); extern tree build_conditional_expr (location_t, tree, tree, tree, tsubst_flags_t); extern tree build_addr_func (tree, tsubst_flags_t); extern void set_flags_from_callee (tree); extern tree build_call_a (tree, int, tree*); extern tree build_call_n (tree, int, ...); extern bool null_ptr_cst_p (tree); extern bool null_member_pointer_value_p (tree); extern bool sufficient_parms_p (const_tree); extern tree type_decays_to (tree); extern tree build_user_type_conversion (tree, tree, int, tsubst_flags_t); extern tree build_new_function_call (tree, vec<tree, va_gc> **, bool, tsubst_flags_t); extern tree build_operator_new_call (tree, vec<tree, va_gc> **, tree *, tree *, tree, tree *, tsubst_flags_t); extern tree build_new_method_call (tree, tree, vec<tree, va_gc> **, tree, int, tree *, tsubst_flags_t); extern tree build_special_member_call (tree, tree, vec<tree, va_gc> **, tree, int, tsubst_flags_t); extern tree build_new_op (location_t, enum tree_code, int, tree, tree, tree, tree *, tsubst_flags_t); extern tree build_op_call (tree, vec<tree, va_gc> **, tsubst_flags_t); extern bool non_placement_deallocation_fn_p (tree); extern tree build_op_delete_call (enum tree_code, tree, tree, bool, tree, tree, tsubst_flags_t); extern bool can_convert (tree, tree, tsubst_flags_t); extern bool can_convert_standard (tree, tree, tsubst_flags_t); extern bool can_convert_arg (tree, tree, tree, int, tsubst_flags_t); extern bool can_convert_arg_bad (tree, tree, tree, int, tsubst_flags_t); extern bool enforce_access (tree, tree, tree, tsubst_flags_t); extern void push_defarg_context (tree); extern void pop_defarg_context (void); extern tree convert_default_arg (tree, tree, tree, int, tsubst_flags_t); extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t); extern tree build_x_va_arg (source_location, tree, tree); extern tree cxx_type_promotes_to (tree); extern tree type_passed_as (tree); extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t); extern bool is_properly_derived_from (tree, tree); extern tree initialize_reference (tree, tree, int, tsubst_flags_t); extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**); extern tree make_temporary_var_for_ref_to_temp (tree, tree); extern bool type_has_extended_temps (tree); extern tree strip_top_quals (tree); extern bool reference_related_p (tree, tree); extern int remaining_arguments (tree); extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t); extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int); extern tree build_integral_nontype_arg_conv (tree, tree, tsubst_flags_t); extern tree perform_direct_initialization_if_possible (tree, tree, bool, tsubst_flags_t); extern tree in_charge_arg_for_name (tree); extern tree build_cxx_call (tree, int, tree *, tsubst_flags_t); extern bool is_std_init_list (tree); extern bool is_list_ctor (tree); extern void validate_conversion_obstack (void); extern void mark_versions_used (tree); extern tree get_function_version_dispatcher (tree); /* in class.c */ extern tree build_vfield_ref (tree, tree); extern tree build_if_in_charge (tree true_stmt, tree false_stmt = void_node); extern tree build_base_path (enum tree_code, tree, tree, int, tsubst_flags_t); extern tree convert_to_base (tree, tree, bool, bool, tsubst_flags_t); extern tree convert_to_base_statically (tree, tree); extern tree build_vtbl_ref (tree, tree); extern tree build_vfn_ref (tree, tree); extern tree get_vtable_decl (tree, int); extern void resort_type_method_vec (void *, void *, gt_pointer_operator, void *); extern bool add_method (tree, tree, tree); extern tree currently_open_class (tree); extern tree currently_open_derived_class (tree); extern tree outermost_open_class (void); extern tree current_nonlambda_class_type (void); extern tree finish_struct (tree, tree); extern void finish_struct_1 (tree); extern int resolves_to_fixed_type_p (tree, int *); extern void init_class_processing (void); extern int is_empty_class (tree); extern bool is_really_empty_class (tree); extern void pushclass (tree); extern void popclass (void); extern void push_nested_class (tree); extern void pop_nested_class (void); extern int current_lang_depth (void); extern void push_lang_context (tree); extern void pop_lang_context (void); extern tree instantiate_type (tree, tree, tsubst_flags_t); extern void print_class_statistics (void); extern void build_self_reference (void); extern int same_signature_p (const_tree, const_tree); extern void maybe_add_class_template_decl_list (tree, tree, int); extern void unreverse_member_declarations (tree); extern void invalidate_class_lookup_cache (void); extern void maybe_note_name_used_in_class (tree, tree); extern void note_name_declared_in_class (tree, tree); extern tree get_vtbl_decl_for_binfo (tree); extern bool vptr_via_virtual_p (tree); extern void debug_class (tree); extern void debug_thunks (tree); extern void set_linkage_according_to_type (tree, tree); extern void determine_key_method (tree); extern void check_for_override (tree, tree); extern void push_class_stack (void); extern void pop_class_stack (void); extern bool type_has_user_nondefault_constructor (tree); extern tree in_class_defaulted_default_constructor (tree); extern bool user_provided_p (tree); extern bool type_has_user_provided_constructor (tree); extern bool type_has_user_provided_or_explicit_constructor (tree); extern bool type_has_non_user_provided_default_constructor (tree); extern bool vbase_has_user_provided_move_assign (tree); extern tree default_init_uninitialized_part (tree); extern bool trivial_default_constructor_is_constexpr (tree); extern bool type_has_constexpr_default_constructor (tree); extern bool type_has_virtual_destructor (tree); extern bool type_has_move_constructor (tree); extern bool type_has_move_assign (tree); extern bool type_has_user_declared_move_constructor (tree); extern bool type_has_user_declared_move_assign(tree); extern bool type_build_ctor_call (tree); extern bool type_build_dtor_call (tree); extern void explain_non_literal_class (tree); extern void inherit_targ_abi_tags (tree); extern void defaulted_late_check (tree); extern bool defaultable_fn_check (tree); extern void check_abi_tags (tree); extern void fixup_type_variants (tree); extern void fixup_attribute_variants (tree); extern tree* decl_cloned_function_p (const_tree, bool); extern void clone_function_decl (tree, int); extern void adjust_clone_args (tree); extern void deduce_noexcept_on_destructor (tree); extern void insert_late_enum_def_into_classtype_sorted_fields (tree, tree); extern bool uniquely_derived_from_p (tree, tree); extern bool publicly_uniquely_derived_p (tree, tree); extern tree common_enclosing_class (tree, tree); /* in cvt.c */ extern tree convert_to_reference (tree, tree, int, int, tree, tsubst_flags_t); extern tree convert_from_reference (tree); extern tree force_rvalue (tree, tsubst_flags_t); extern tree ocp_convert (tree, tree, int, int, tsubst_flags_t); extern tree cp_convert (tree, tree, tsubst_flags_t); extern tree cp_convert_and_check (tree, tree, tsubst_flags_t); extern tree cp_fold_convert (tree, tree); extern tree convert_to_void (tree, impl_conv_void, tsubst_flags_t); extern tree convert_force (tree, tree, int, tsubst_flags_t); extern tree build_expr_type_conversion (int, tree, bool); extern tree type_promotes_to (tree); extern tree perform_qualification_conversions (tree, tree); extern bool tx_safe_fn_type_p (tree); extern tree tx_unsafe_fn_variant (tree); extern bool can_convert_tx_safety (tree, tree); /* in name-lookup.c */ extern tree pushdecl (tree); extern tree pushdecl_maybe_friend (tree, bool); extern void maybe_push_cleanup_level (tree); extern tree pushtag (tree, tree, tag_scope); extern tree make_anon_name (void); extern tree pushdecl_top_level_maybe_friend (tree, bool); extern tree pushdecl_top_level_and_finish (tree, tree); extern tree check_for_out_of_scope_variable (tree); extern void dump (cp_binding_level &ref); extern void dump (cp_binding_level *ptr); extern void print_other_binding_stack (cp_binding_level *); extern tree maybe_push_decl (tree); extern tree current_decl_namespace (void); /* decl.c */ extern tree poplevel (int, int, int); extern void cxx_init_decl_processing (void); enum cp_tree_node_structure_enum cp_tree_node_structure (union lang_tree_node *); extern void finish_scope (void); extern void push_switch (tree); extern void pop_switch (void); extern tree make_lambda_name (void); extern int decls_match (tree, tree); extern tree duplicate_decls (tree, tree, bool); extern tree declare_local_label (tree); extern tree define_label (location_t, tree); extern void check_goto (tree); extern bool check_omp_return (void); extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t); extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t); extern tree build_library_fn_ptr (const char *, tree, int); extern tree build_cp_library_fn_ptr (const char *, tree, int); extern tree push_library_fn (tree, tree, tree, int); extern tree push_void_library_fn (tree, tree, int); extern tree push_throw_library_fn (tree, tree); extern void warn_misplaced_attr_for_class_type (source_location location, tree class_type); extern tree check_tag_decl (cp_decl_specifier_seq *, bool); extern tree shadow_tag (cp_decl_specifier_seq *); extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool); extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *); extern void start_decl_1 (tree, bool); extern bool check_array_initializer (tree, tree, tree); extern void cp_finish_decl (tree, tree, bool, tree, int); extern int cp_complete_array_type (tree *, tree, bool); extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t); extern tree build_ptrmemfunc_type (tree); extern tree build_ptrmem_type (tree, tree); /* the grokdeclarator prototype is in decl.h */ extern tree build_this_parm (tree, cp_cv_quals); extern tree grokparms (tree, tree *); extern int copy_fn_p (const_tree); extern bool move_fn_p (const_tree); extern bool move_signature_fn_p (const_tree); extern tree get_scope_of_declarator (const cp_declarator *); extern void grok_special_member_properties (tree); extern int grok_ctor_properties (const_tree, const_tree); extern bool grok_op_properties (tree, bool); extern tree xref_tag (enum tag_types, tree, tag_scope, bool); extern tree xref_tag_from_type (tree, tree, tag_scope); extern bool xref_basetypes (tree, tree); extern tree start_enum (tree, tree, tree, tree, bool, bool *); extern void finish_enum_value_list (tree); extern void finish_enum (tree); extern void build_enumerator (tree, tree, tree, tree, location_t); extern tree lookup_enumerator (tree, tree); extern bool start_preparsed_function (tree, tree, int); extern bool start_function (cp_decl_specifier_seq *, const cp_declarator *, tree); extern tree begin_function_body (void); extern void finish_function_body (tree); extern tree outer_curly_brace_block (tree); extern tree finish_function (int); extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree); extern void maybe_register_incomplete_var (tree); extern void maybe_commonize_var (tree); extern void complete_vars (tree); extern tree static_fn_type (tree); extern void revert_static_member_fn (tree); extern void fixup_anonymous_aggr (tree); extern tree compute_array_index_type (tree, tree, tsubst_flags_t); extern tree check_default_argument (tree, tree, tsubst_flags_t); typedef int (*walk_namespaces_fn) (tree, void *); extern int walk_namespaces (walk_namespaces_fn, void *); extern int wrapup_globals_for_namespace (tree, void *); extern tree create_implicit_typedef (tree, tree); extern int local_variable_p (const_tree); extern tree register_dtor_fn (tree); extern tmpl_spec_kind current_tmpl_spec_kind (int); extern tree cp_fname_init (const char *, tree *); extern tree cxx_builtin_function (tree decl); extern tree cxx_builtin_function_ext_scope (tree decl); extern tree check_elaborated_type_specifier (enum tag_types, tree, bool); extern void warn_extern_redeclared_static (tree, tree); extern tree cxx_comdat_group (tree); extern bool cp_missing_noreturn_ok_p (tree); extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *); extern tree check_var_type (tree, tree); extern tree reshape_init (tree, tree, tsubst_flags_t); extern tree next_initializable_field (tree); extern tree fndecl_declared_return_type (tree); extern bool undeduced_auto_decl (tree); extern void require_deduced_type (tree); extern tree finish_case_label (location_t, tree, tree); extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t); /* in decl2.c */ extern void note_mangling_alias (tree, tree); extern void generate_mangling_aliases (void); extern bool check_java_method (tree); extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier); extern tree build_pointer_ptrmemfn_type (tree); extern tree change_return_type (tree, tree); extern void maybe_retrofit_in_chrg (tree); extern void maybe_make_one_only (tree); extern bool vague_linkage_p (tree); extern void grokclassfn (tree, tree, enum overload_flags); extern tree grok_array_decl (location_t, tree, tree, bool); extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t); extern tree check_classfn (tree, tree, tree); extern void check_member_template (tree); extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *, tree, bool, tree, tree); extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *, tree, tree); extern tree cp_reconstruct_complex_type (tree, tree); extern bool attributes_naming_typedef_ok (tree); extern void cplus_decl_attributes (tree *, tree, int); extern void finish_anon_union (tree); extern void cxx_post_compilation_parsing_cleanups (void); extern tree coerce_new_type (tree); extern tree coerce_delete_type (tree); extern void comdat_linkage (tree); extern void determine_visibility (tree); extern void constrain_class_visibility (tree); extern void reset_type_linkage (tree); extern void tentative_decl_linkage (tree); extern void import_export_decl (tree); extern tree build_cleanup (tree); extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **, tsubst_flags_t); extern bool decl_constant_var_p (tree); extern bool decl_maybe_constant_var_p (tree); extern void no_linkage_error (tree); extern void check_default_args (tree); extern bool mark_used (tree); extern bool mark_used (tree, tsubst_flags_t); extern void finish_static_data_member_decl (tree, tree, bool, tree, int); extern tree cp_build_parm_decl (tree, tree); extern tree get_guard (tree); extern tree get_guard_cond (tree, bool); extern tree set_guard (tree); extern tree get_tls_wrapper_fn (tree); extern void mark_needed (tree); extern bool decl_needed_p (tree); extern void note_vague_linkage_fn (tree); extern void note_variable_template_instantiation (tree); extern tree build_artificial_parm (tree, tree); extern bool possibly_inlined_p (tree); extern int parm_index (tree); extern tree vtv_start_verification_constructor_init_function (void); extern tree vtv_finish_verification_constructor_init_function (tree); extern bool cp_omp_mappable_type (tree); /* in error.c */ extern const char *type_as_string (tree, int); extern const char *type_as_string_translate (tree, int); extern const char *decl_as_string (tree, int); extern const char *decl_as_string_translate (tree, int); extern const char *decl_as_dwarf_string (tree, int); extern const char *expr_as_string (tree, int); extern const char *lang_decl_name (tree, int, bool); extern const char *lang_decl_dwarf_name (tree, int, bool); extern const char *language_to_string (enum languages); extern const char *class_key_or_enum_as_string (tree); extern void maybe_warn_variadic_templates (void); extern void maybe_warn_cpp0x (cpp0x_warn_str str); extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); extern location_t location_of (tree); extern void qualified_name_lookup_error (tree, tree, tree, location_t); /* in except.c */ extern void init_exception_processing (void); extern tree expand_start_catch_block (tree); extern void expand_end_catch_block (void); extern tree build_exc_ptr (void); extern tree build_throw (tree); extern int nothrow_libfn_p (const_tree); extern void check_handlers (tree); extern tree finish_noexcept_expr (tree, tsubst_flags_t); extern bool expr_noexcept_p (tree, tsubst_flags_t); extern void perform_deferred_noexcept_checks (void); extern bool nothrow_spec_p (const_tree); extern bool type_noexcept_p (const_tree); extern bool type_throw_all_p (const_tree); extern tree build_noexcept_spec (tree, int); extern void choose_personality_routine (enum languages); extern tree build_must_not_throw_expr (tree,tree); extern tree eh_type_info (tree); extern tree begin_eh_spec_block (void); extern void finish_eh_spec_block (tree, tree); extern tree build_eh_type_type (tree); extern tree cp_protect_cleanup_actions (void); extern tree create_try_catch_expr (tree, tree); /* in expr.c */ extern tree cplus_expand_constant (tree); extern tree mark_rvalue_use (tree, location_t = UNKNOWN_LOCATION, bool = true); extern tree mark_lvalue_use (tree); extern tree mark_type_use (tree); extern void mark_exp_read (tree); /* friend.c */ extern int is_friend (tree, tree); extern void make_friend_class (tree, tree, bool); extern void add_friend (tree, tree, bool); extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool); /* in init.c */ extern tree expand_member_init (tree); extern void emit_mem_initializers (tree); extern tree build_aggr_init (tree, tree, int, tsubst_flags_t); extern int is_class_type (tree, int); extern tree get_type_value (tree); extern tree build_zero_init (tree, tree, bool); extern tree build_value_init (tree, tsubst_flags_t); extern tree build_value_init_noctor (tree, tsubst_flags_t); extern tree get_nsdmi (tree, bool); extern tree build_offset_ref (tree, tree, bool, tsubst_flags_t); extern tree throw_bad_array_new_length (void); extern tree build_new (vec<tree, va_gc> **, tree, tree, vec<tree, va_gc> **, int, tsubst_flags_t); extern tree get_temp_regvar (tree, tree); extern tree build_vec_init (tree, tree, tree, bool, int, tsubst_flags_t); extern tree build_delete (tree, tree, special_function_kind, int, int, tsubst_flags_t); extern void push_base_cleanups (void); extern tree build_vec_delete (tree, tree, special_function_kind, int, tsubst_flags_t); extern tree create_temporary_var (tree); extern void initialize_vtbl_ptrs (tree); extern tree build_java_class_ref (tree); extern tree scalar_constant_value (tree); extern tree decl_really_constant_value (tree); extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool); extern tree build_vtbl_address (tree); /* in lex.c */ extern void cxx_dup_lang_specific_decl (tree); extern void yyungetc (int, int); extern tree unqualified_name_lookup_error (tree); extern tree unqualified_fn_lookup_error (tree); extern tree build_lang_decl (enum tree_code, tree, tree); extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree); extern void retrofit_lang_decl (tree); extern tree copy_decl (tree); extern tree copy_type (tree); extern tree cxx_make_type (enum tree_code); extern tree make_class_type (enum tree_code); extern bool cxx_init (void); extern void cxx_finish (void); extern bool in_main_input_context (void); /* in method.c */ extern void init_method (void); extern tree make_thunk (tree, bool, tree, tree); extern void finish_thunk (tree); extern void use_thunk (tree, bool); extern bool trivial_fn_p (tree); extern tree forward_parm (tree); extern bool is_trivially_xible (enum tree_code, tree, tree); extern tree get_defaulted_eh_spec (tree); extern tree unevaluated_noexcept_spec (void); extern void after_nsdmi_defaulted_late_checks (tree); extern bool maybe_explain_implicit_delete (tree); extern void explain_implicit_non_constexpr (tree); extern void deduce_inheriting_ctor (tree); extern void synthesize_method (tree); extern tree lazily_declare_fn (special_function_kind, tree); extern tree skip_artificial_parms_for (const_tree, tree); extern int num_artificial_parms_for (const_tree); extern tree make_alias_for (tree, tree); extern tree get_copy_ctor (tree, tsubst_flags_t); extern tree get_copy_assign (tree); extern tree get_default_ctor (tree); extern tree get_dtor (tree, tsubst_flags_t); extern tree get_inherited_ctor (tree); extern tree locate_ctor (tree); extern tree implicitly_declare_fn (special_function_kind, tree, bool, tree, tree); /* In optimize.c */ extern bool maybe_clone_body (tree); /* In parser.c */ extern tree cp_convert_range_for (tree, tree, tree, bool); extern bool parsing_nsdmi (void); extern void inject_this_parameter (tree, cp_cv_quals); /* in pt.c */ extern bool check_template_shadow (tree); extern tree get_innermost_template_args (tree, int); extern void maybe_begin_member_template_processing (tree); extern void maybe_end_member_template_processing (void); extern tree finish_member_template_decl (tree); extern void begin_template_parm_list (void); extern bool begin_specialization (void); extern void reset_specialization (void); extern void end_specialization (void); extern void begin_explicit_instantiation (void); extern void end_explicit_instantiation (void); extern tree check_explicit_specialization (tree, tree, int, int); extern int num_template_headers_for_class (tree); extern void check_template_variable (tree); extern tree make_auto (void); extern tree make_decltype_auto (void); extern tree do_auto_deduction (tree, tree, tree); extern tree do_auto_deduction (tree, tree, tree, tsubst_flags_t, auto_deduction_context); extern tree type_uses_auto (tree); extern tree type_uses_auto_or_concept (tree); extern void append_type_to_template_for_access_check (tree, tree, tree, location_t); extern tree convert_generic_types_to_packs (tree, int, int); extern tree splice_late_return_type (tree, tree); extern bool is_auto (const_tree); extern bool is_auto_or_concept (const_tree); extern tree process_template_parm (tree, location_t, tree, bool, bool); extern tree end_template_parm_list (tree); extern void end_template_parm_list (void); extern void end_template_decl (void); extern tree maybe_update_decl_type (tree, tree); extern bool check_default_tmpl_args (tree, tree, bool, bool, int); extern tree push_template_decl (tree); extern tree push_template_decl_real (tree, bool); extern tree add_inherited_template_parms (tree, tree); extern bool redeclare_class_template (tree, tree, tree); extern tree lookup_template_class (tree, tree, tree, tree, int, tsubst_flags_t); extern tree lookup_template_function (tree, tree); extern tree lookup_template_variable (tree, tree); extern int uses_template_parms (tree); extern bool uses_template_parms_level (tree, int); extern bool in_template_function (void); extern tree instantiate_class_template (tree); extern tree instantiate_template (tree, tree, tsubst_flags_t); extern tree fn_type_unification (tree, tree, tree, const tree *, unsigned int, tree, unification_kind_t, int, bool, bool); extern void mark_decl_instantiated (tree, int); extern int more_specialized_fn (tree, tree, int); extern void do_decl_instantiation (tree, tree); extern void do_type_instantiation (tree, tree, tsubst_flags_t); extern bool always_instantiate_p (tree); extern void maybe_instantiate_noexcept (tree); extern tree instantiate_decl (tree, int, bool); extern int comp_template_parms (const_tree, const_tree); extern bool uses_parameter_packs (tree); extern bool template_parameter_pack_p (const_tree); extern bool function_parameter_pack_p (const_tree); extern bool function_parameter_expanded_from_pack_p (tree, tree); extern tree make_pack_expansion (tree); extern bool check_for_bare_parameter_packs (tree); extern tree build_template_info (tree, tree); extern tree get_template_info (const_tree); extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree); extern int template_class_depth (tree); extern int is_specialization_of (tree, tree); extern bool is_specialization_of_friend (tree, tree); extern tree get_pattern_parm (tree, tree); extern int comp_template_args (tree, tree, tree * = NULL, tree * = NULL); extern int template_args_equal (tree, tree); extern tree maybe_process_partial_specialization (tree); extern tree most_specialized_instantiation (tree); extern void print_candidates (tree); extern void instantiate_pending_templates (int); extern tree tsubst_default_argument (tree, tree, tree, tsubst_flags_t); extern tree tsubst (tree, tree, tsubst_flags_t, tree); extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t, tree, bool, bool); extern tree tsubst_expr (tree, tree, tsubst_flags_t, tree, bool); extern tree tsubst_pack_expansion (tree, tree, tsubst_flags_t, tree); extern tree most_general_template (tree); extern tree get_mostly_instantiated_function_type (tree); extern bool problematic_instantiation_changed (void); extern void record_last_problematic_instantiation (void); extern struct tinst_level *current_instantiation(void); extern bool instantiating_current_function_p (void); extern tree maybe_get_template_decl_from_type_decl (tree); extern int processing_template_parmlist; extern bool dependent_type_p (tree); extern bool dependent_scope_p (tree); extern bool any_dependent_template_arguments_p (const_tree); extern bool dependent_template_p (tree); extern bool dependent_template_id_p (tree, tree); extern bool type_dependent_expression_p (tree); extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *); extern bool any_type_dependent_elements_p (const_tree); extern bool type_dependent_expression_p_push (tree); extern bool value_dependent_expression_p (tree); extern bool instantiation_dependent_expression_p (tree); extern bool instantiation_dependent_uneval_expression_p (tree); extern bool any_value_dependent_elements_p (const_tree); extern bool dependent_omp_for_p (tree, tree, tree, tree); extern tree resolve_typename_type (tree, bool); extern tree template_for_substitution (tree); extern tree build_non_dependent_expr (tree); extern void make_args_non_dependent (vec<tree, va_gc> *); extern bool reregister_specialization (tree, tree, tree); extern tree instantiate_non_dependent_expr (tree); extern tree instantiate_non_dependent_expr_sfinae (tree, tsubst_flags_t); extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t); extern tree instantiate_non_dependent_or_null (tree); extern bool variable_template_specialization_p (tree); extern bool alias_type_or_template_p (tree); extern bool alias_template_specialization_p (const_tree); extern bool dependent_alias_template_spec_p (const_tree); extern bool explicit_class_specialization_p (tree); extern bool push_tinst_level (tree); extern bool push_tinst_level_loc (tree, location_t); extern void pop_tinst_level (void); extern struct tinst_level *outermost_tinst_level(void); extern void init_template_processing (void); extern void print_template_statistics (void); bool template_template_parameter_p (const_tree); bool template_type_parameter_p (const_tree); extern bool primary_template_instantiation_p (const_tree); extern tree get_primary_template_innermost_parameters (const_tree); extern tree get_template_parms_at_level (tree, int); extern tree get_template_innermost_arguments (const_tree); extern tree get_template_argument_pack_elems (const_tree); extern tree get_function_template_decl (const_tree); extern tree resolve_nondeduced_context (tree, tsubst_flags_t); extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val); extern tree coerce_template_parms (tree, tree, tree); extern tree coerce_template_parms (tree, tree, tree, tsubst_flags_t); extern void register_local_specialization (tree, tree); extern tree retrieve_local_specialization (tree); extern tree extract_fnparm_pack (tree, tree *); extern tree template_parm_to_arg (tree); /* in repo.c */ extern void init_repo (void); extern int repo_emit_p (tree); extern bool repo_export_class_p (const_tree); extern void finish_repo (void); /* in rtti.c */ /* A vector of all tinfo decls that haven't been emitted yet. */ extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls; extern void init_rtti_processing (void); extern tree build_typeid (tree, tsubst_flags_t); extern tree get_tinfo_decl (tree); extern tree get_typeid (tree, tsubst_flags_t); extern tree build_headof (tree); extern tree build_dynamic_cast (tree, tree, tsubst_flags_t); extern void emit_support_tinfos (void); extern bool emit_tinfo_decl (tree); /* in search.c */ extern bool accessible_base_p (tree, tree, bool); extern tree lookup_base (tree, tree, base_access, base_kind *, tsubst_flags_t); extern tree dcast_base_hint (tree, tree); extern int accessible_p (tree, tree, bool); extern int accessible_in_template_p (tree, tree); extern tree lookup_field_1 (tree, tree, bool); extern tree lookup_field (tree, tree, int, bool); extern int lookup_fnfields_1 (tree, tree); extern tree lookup_fnfields_slot (tree, tree); extern tree lookup_fnfields_slot_nolazy (tree, tree); extern int class_method_index_for_fn (tree, tree); extern tree lookup_fnfields (tree, tree, int); extern tree lookup_member (tree, tree, int, bool, tsubst_flags_t); extern tree lookup_member_fuzzy (tree, tree, bool); extern int look_for_overrides (tree, tree); extern void get_pure_virtuals (tree); extern void maybe_suppress_debug_info (tree); extern void note_debug_info_needed (tree); extern void print_search_statistics (void); extern void reinit_search_statistics (void); extern tree current_scope (void); extern int at_function_scope_p (void); extern bool at_class_scope_p (void); extern bool at_namespace_scope_p (void); extern tree context_for_name_lookup (tree); extern tree lookup_conversions (tree); extern tree binfo_from_vbase (tree); extern tree binfo_for_vbase (tree, tree); extern tree look_for_overrides_here (tree, tree); #define dfs_skip_bases ((tree)1) extern tree dfs_walk_all (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree dfs_walk_once (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree binfo_via_virtual (tree, tree); extern tree build_baselink (tree, tree, tree, tree); extern tree adjust_result_of_qualified_name_lookup (tree, tree, tree); extern tree copied_binfo (tree, tree); extern tree original_binfo (tree, tree); extern int shared_member_p (tree); /* The representation of a deferred access check. */ struct GTY(()) deferred_access_check { /* The base class in which the declaration is referenced. */ tree binfo; /* The declaration whose access must be checked. */ tree decl; /* The declaration that should be used in the error message. */ tree diag_decl; /* The location of this access. */ location_t loc; }; /* in semantics.c */ extern void push_deferring_access_checks (deferring_kind); extern void resume_deferring_access_checks (void); extern void stop_deferring_access_checks (void); extern void pop_deferring_access_checks (void); extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void); extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *); extern void pop_to_parent_deferring_access_checks (void); extern bool perform_access_checks (vec<deferred_access_check, va_gc> *, tsubst_flags_t); extern bool perform_deferred_access_checks (tsubst_flags_t); extern bool perform_or_defer_access_check (tree, tree, tree, tsubst_flags_t); /* RAII sentinel to ensures that deferred access checks are popped before a function returns. */ struct deferring_access_check_sentinel { deferring_access_check_sentinel () { push_deferring_access_checks (dk_deferred); } ~deferring_access_check_sentinel () { pop_deferring_access_checks (); } }; extern int stmts_are_full_exprs_p (void); extern void init_cp_semantics (void); extern tree do_poplevel (tree); extern void break_maybe_infinite_loop (void); extern void add_decl_expr (tree); extern tree maybe_cleanup_point_expr_void (tree); extern tree finish_expr_stmt (tree); extern tree begin_if_stmt (void); extern void finish_if_stmt_cond (tree, tree); extern tree finish_then_clause (tree); extern void begin_else_clause (tree); extern void finish_else_clause (tree); extern void finish_if_stmt (tree); extern tree begin_while_stmt (void); extern void finish_while_stmt_cond (tree, tree, bool); extern void finish_while_stmt (tree); extern tree begin_do_stmt (void); extern void finish_do_body (tree); extern void finish_do_stmt (tree, tree, bool); extern tree finish_return_stmt (tree); extern tree begin_for_scope (tree *); extern tree begin_for_stmt (tree, tree); extern void finish_for_init_stmt (tree); extern void finish_for_cond (tree, tree, bool); extern void finish_for_expr (tree, tree); extern void finish_for_stmt (tree); extern tree begin_range_for_stmt (tree, tree); extern void finish_range_for_decl (tree, tree, tree); extern void finish_range_for_stmt (tree); extern tree finish_break_stmt (void); extern tree finish_continue_stmt (void); extern tree begin_switch_stmt (void); extern void finish_switch_cond (tree, tree); extern void finish_switch_stmt (tree); extern tree finish_goto_stmt (tree); extern tree begin_try_block (void); extern void finish_try_block (tree); extern void finish_handler_sequence (tree); extern tree begin_function_try_block (tree *); extern void finish_function_try_block (tree); extern void finish_function_handler_sequence (tree, tree); extern void finish_cleanup_try_block (tree); extern tree begin_handler (void); extern void finish_handler_parms (tree, tree); extern void finish_handler (tree); extern void finish_cleanup (tree, tree); extern bool is_this_parameter (tree); enum { BCS_NORMAL = 0, BCS_NO_SCOPE = 1, BCS_TRY_BLOCK = 2, BCS_FN_BODY = 4, BCS_TRANSACTION = 8 }; extern tree begin_compound_stmt (unsigned int); extern void finish_compound_stmt (tree); extern tree finish_asm_stmt (int, tree, tree, tree, tree, tree); extern tree finish_label_stmt (tree); extern void finish_label_decl (tree); extern cp_expr finish_parenthesized_expr (cp_expr); extern tree force_paren_expr (tree); extern tree maybe_undo_parenthesized_ref (tree); extern tree finish_non_static_data_member (tree, tree, tree); extern tree begin_stmt_expr (void); extern tree finish_stmt_expr_expr (tree, tree); extern tree finish_stmt_expr (tree, bool); extern tree stmt_expr_value_expr (tree); bool empty_expr_stmt_p (tree); extern cp_expr perform_koenig_lookup (cp_expr, vec<tree, va_gc> *, tsubst_flags_t); extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool, bool, tsubst_flags_t); extern tree finish_template_variable (tree, tsubst_flags_t = tf_warning_or_error); extern cp_expr finish_increment_expr (cp_expr, enum tree_code); extern tree finish_this_expr (void); extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t); extern cp_expr finish_unary_op_expr (location_t, enum tree_code, cp_expr, tsubst_flags_t); extern tree finish_compound_literal (tree, tree, tsubst_flags_t); extern tree finish_fname (tree); extern void finish_translation_unit (void); extern tree finish_template_type_parm (tree, tree); extern tree finish_template_template_parm (tree, tree); extern tree begin_class_definition (tree); extern void finish_template_decl (tree); extern tree finish_template_type (tree, tree, int); extern tree finish_base_specifier (tree, tree, bool); extern void finish_member_declaration (tree); extern bool outer_automatic_var_p (tree); extern tree process_outer_var_ref (tree, tsubst_flags_t); extern cp_expr finish_id_expression (tree, tree, tree, cp_id_kind *, bool, bool, bool *, bool, bool, bool, bool, const char **, location_t); extern tree finish_typeof (tree); extern tree finish_underlying_type (tree); extern tree calculate_bases (tree); extern tree finish_bases (tree, bool); extern tree calculate_direct_bases (tree); extern tree finish_offsetof (tree, location_t); extern void finish_decl_cleanup (tree, tree); extern void finish_eh_cleanup (tree); extern void emit_associated_thunks (tree); extern void finish_mem_initializers (tree); extern tree check_template_template_default_arg (tree); extern bool expand_or_defer_fn_1 (tree); extern void expand_or_defer_fn (tree); extern void add_typedef_to_current_template_for_access_check (tree, tree, location_t); extern void check_accessibility_of_qualified_id (tree, tree, tree); extern tree finish_qualified_id_expr (tree, tree, bool, bool, bool, bool, tsubst_flags_t); extern void simplify_aggr_init_expr (tree *); extern void finalize_nrv (tree *, tree, tree); extern tree omp_reduction_id (enum tree_code, tree, tree); extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *); extern void cp_check_omp_declare_reduction (tree); extern void finish_omp_declare_simd_methods (tree); extern tree finish_omp_clauses (tree, bool, bool = false); extern tree push_omp_privatization_clauses (bool); extern void pop_omp_privatization_clauses (tree); extern void save_omp_privatization_clauses (vec<tree> &); extern void restore_omp_privatization_clauses (vec<tree> &); extern void finish_omp_threadprivate (tree); extern tree begin_omp_structured_block (void); extern tree finish_omp_structured_block (tree); extern tree finish_oacc_data (tree, tree); extern tree finish_oacc_host_data (tree, tree); extern tree finish_omp_construct (enum tree_code, tree, tree); extern tree begin_omp_parallel (void); extern tree finish_omp_parallel (tree, tree); extern tree begin_omp_task (void); extern tree finish_omp_task (tree, tree); extern tree finish_omp_for (location_t, enum tree_code, tree, tree, tree, tree, tree, tree, tree, vec<tree> *, tree); extern void finish_omp_atomic (enum tree_code, enum tree_code, tree, tree, tree, tree, tree, bool); extern void finish_omp_barrier (void); extern void finish_omp_flush (void); extern void finish_omp_taskwait (void); extern void finish_omp_taskyield (void); extern void finish_omp_cancel (tree); extern void finish_omp_cancellation_point (tree); extern tree omp_privatize_field (tree, bool); extern tree begin_transaction_stmt (location_t, tree *, int); extern void finish_transaction_stmt (tree, tree, int, tree); extern tree build_transaction_expr (location_t, tree, int, tree); extern bool cxx_omp_create_clause_info (tree, tree, bool, bool, bool, bool); extern tree baselink_for_fns (tree); extern void finish_static_assert (tree, tree, location_t, bool); extern tree finish_decltype_type (tree, bool, tsubst_flags_t); extern tree finish_trait_expr (enum cp_trait_kind, tree, tree); extern tree build_lambda_expr (void); extern tree build_lambda_object (tree); extern tree begin_lambda_type (tree); extern tree lambda_capture_field_type (tree, bool); extern tree lambda_return_type (tree); extern tree lambda_proxy_type (tree); extern tree lambda_function (tree); extern void apply_deduced_return_type (tree, tree); extern tree add_capture (tree, tree, tree, bool, bool); extern tree add_default_capture (tree, tree, tree); extern tree build_capture_proxy (tree); extern void insert_capture_proxy (tree); extern void insert_pending_capture_proxies (void); extern bool is_capture_proxy (tree); extern bool is_normal_capture_proxy (tree); extern void register_capture_members (tree); extern tree lambda_expr_this_capture (tree, bool); extern tree maybe_resolve_dummy (tree, bool); extern tree current_nonlambda_function (void); extern tree nonlambda_method_basetype (void); extern tree current_nonlambda_scope (void); extern bool generic_lambda_fn_p (tree); extern void maybe_add_lambda_conv_op (tree); extern bool is_lambda_ignored_entity (tree); /* in tree.c */ extern int cp_tree_operand_length (const_tree); extern int cp_tree_code_length (enum tree_code); void cp_free_lang_data (tree t); extern tree force_target_expr (tree, tree, tsubst_flags_t); extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t); extern void lang_check_failed (const char *, int, const char *) ATTRIBUTE_NORETURN; extern tree stabilize_expr (tree, tree *); extern void stabilize_call (tree, tree *); extern bool stabilize_init (tree, tree *); extern tree add_stmt_to_compound (tree, tree); extern void init_tree (void); extern bool pod_type_p (const_tree); extern bool layout_pod_type_p (const_tree); extern bool std_layout_type_p (const_tree); extern bool trivial_type_p (const_tree); extern bool trivially_copyable_p (const_tree); extern bool scalarish_type_p (const_tree); extern bool type_has_nontrivial_default_init (const_tree); extern bool type_has_nontrivial_copy_init (const_tree); extern bool class_tmpl_impl_spec_p (const_tree); extern int zero_init_p (const_tree); extern bool check_abi_tag_redeclaration (const_tree, const_tree, const_tree); extern bool check_abi_tag_args (tree, tree); extern tree strip_typedefs (tree, bool * = NULL); extern tree strip_typedefs_expr (tree, bool * = NULL); extern tree copy_binfo (tree, tree, tree, tree *, int); extern int member_p (const_tree); extern cp_lvalue_kind real_lvalue_p (const_tree); extern cp_lvalue_kind lvalue_kind (const_tree); extern bool lvalue_or_rvalue_with_address_p (const_tree); extern bool xvalue_p (const_tree); extern bool builtin_valid_in_constant_expr_p (const_tree); extern tree build_min (enum tree_code, tree, ...); extern tree build_min_nt_loc (location_t, enum tree_code, ...); extern tree build_min_non_dep (enum tree_code, tree, ...); extern tree build_min_non_dep_op_overload (enum tree_code, tree, tree, ...); extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *); extern tree build_cplus_new (tree, tree, tsubst_flags_t); extern tree build_aggr_init_expr (tree, tree); extern tree get_target_expr (tree); extern tree get_target_expr_sfinae (tree, tsubst_flags_t); extern tree build_cplus_array_type (tree, tree); extern tree build_array_of_n_type (tree, int); extern bool array_of_runtime_bound_p (tree); extern tree build_array_copy (tree); extern tree build_vec_init_expr (tree, tree, tsubst_flags_t); extern void diagnose_non_constexpr_vec_init (tree); extern tree hash_tree_cons (tree, tree, tree); extern tree hash_tree_chain (tree, tree); extern tree build_qualified_name (tree, tree, tree, bool); extern tree build_ref_qualified_type (tree, cp_ref_qualifier); extern int is_overloaded_fn (tree); extern tree dependent_name (tree); extern tree get_fns (tree); extern tree get_first_fn (tree); extern tree ovl_cons (tree, tree); extern tree build_overload (tree, tree); extern tree ovl_scope (tree); extern bool non_static_member_function_p (tree); extern const char *cxx_printable_name (tree, int); extern const char *cxx_printable_name_translate (tree, int); extern tree build_exception_variant (tree, tree); extern tree bind_template_template_parm (tree, tree); extern tree array_type_nelts_total (tree); extern tree array_type_nelts_top (tree); extern tree break_out_target_exprs (tree); extern tree build_ctor_subob_ref (tree, tree, tree); extern tree replace_placeholders (tree, tree); extern tree get_type_decl (tree); extern tree decl_namespace_context (tree); extern bool decl_anon_ns_mem_p (const_tree); extern tree lvalue_type (tree); extern tree error_type (tree); extern int varargs_function_p (const_tree); extern bool really_overloaded_fn (tree); extern bool cp_tree_equal (tree, tree); extern tree no_linkage_check (tree, bool); extern void debug_binfo (tree); extern tree build_dummy_object (tree); extern tree maybe_dummy_object (tree, tree *); extern int is_dummy_object (const_tree); extern const struct attribute_spec cxx_attribute_table[]; extern tree make_ptrmem_cst (tree, tree); extern tree cp_build_type_attribute_variant (tree, tree); extern tree cp_build_reference_type (tree, bool); extern tree move (tree); extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t); #define cp_build_qualified_type(TYPE, QUALS) \ cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error) extern bool cv_qualified_p (const_tree); extern tree cv_unqualified (tree); extern special_function_kind special_function_p (const_tree); extern int count_trees (tree); extern int char_type_p (tree); extern void verify_stmt_tree (tree); extern linkage_kind decl_linkage (tree); extern duration_kind decl_storage_duration (tree); extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn, void*, hash_set<tree> *); #define cp_walk_tree(tp,func,data,pset) \ walk_tree_1 (tp, func, data, pset, cp_walk_subtrees) #define cp_walk_tree_without_duplicates(tp,func,data) \ walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees) extern tree rvalue (tree); extern tree convert_bitfield_to_declared_type (tree); extern tree cp_save_expr (tree); extern bool cast_valid_in_integral_constant_expression_p (tree); extern bool cxx_type_hash_eq (const_tree, const_tree); extern void cxx_print_statistics (void); extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t); /* in ptree.c */ extern void cxx_print_xnode (FILE *, tree, int); extern void cxx_print_decl (FILE *, tree, int); extern void cxx_print_type (FILE *, tree, int); extern void cxx_print_identifier (FILE *, tree, int); extern void cxx_print_error_function (diagnostic_context *, const char *, struct diagnostic_info *); /* in typeck.c */ extern bool cxx_mark_addressable (tree); extern int string_conv_p (const_tree, const_tree, int); extern tree cp_truthvalue_conversion (tree); extern tree condition_conversion (tree); extern tree require_complete_type (tree); extern tree require_complete_type_sfinae (tree, tsubst_flags_t); extern tree complete_type (tree); extern tree complete_type_or_else (tree, tree); extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t); extern int type_unknown_p (const_tree); enum { ce_derived, ce_normal, ce_exact }; extern bool comp_except_specs (const_tree, const_tree, int); extern bool comptypes (tree, tree, int); extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree); extern bool compparms (const_tree, const_tree); extern int comp_cv_qualification (const_tree, const_tree); extern int comp_cv_qualification (int, int); extern int comp_cv_qual_signature (tree, tree); extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool); extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool); extern tree cxx_alignas_expr (tree); extern tree cxx_sizeof_nowarn (tree); extern tree is_bitfield_expr_with_lowered_type (const_tree); extern tree unlowered_expr_type (const_tree); extern tree decay_conversion (tree, tsubst_flags_t, bool = true); extern tree build_class_member_access_expr (cp_expr, tree, tree, bool, tsubst_flags_t); extern tree finish_class_member_access_expr (cp_expr, tree, bool, tsubst_flags_t); extern tree build_x_indirect_ref (location_t, tree, ref_operator, tsubst_flags_t); extern tree cp_build_indirect_ref (tree, ref_operator, tsubst_flags_t); extern tree build_array_ref (location_t, tree, tree); extern tree cp_build_array_ref (location_t, tree, tree, tsubst_flags_t); extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t); extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...) ATTRIBUTE_SENTINEL; extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **, tsubst_flags_t); extern tree build_x_binary_op (location_t, enum tree_code, tree, enum tree_code, tree, enum tree_code, tree *, tsubst_flags_t); extern tree build_x_array_ref (location_t, tree, tree, tsubst_flags_t); extern tree build_x_unary_op (location_t, enum tree_code, cp_expr, tsubst_flags_t); extern tree cp_build_addr_expr (tree, tsubst_flags_t); extern tree cp_build_unary_op (enum tree_code, tree, int, tsubst_flags_t); extern tree unary_complex_lvalue (enum tree_code, tree); extern tree build_x_conditional_expr (location_t, tree, tree, tree, tsubst_flags_t); extern tree build_x_compound_expr_from_list (tree, expr_list_kind, tsubst_flags_t); extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *, const char *, tsubst_flags_t); extern tree build_x_compound_expr (location_t, tree, tree, tsubst_flags_t); extern tree build_compound_expr (location_t, tree, tree); extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t); extern tree build_static_cast (tree, tree, tsubst_flags_t); extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t); extern tree build_const_cast (tree, tree, tsubst_flags_t); extern tree build_c_cast (location_t, tree, tree); extern cp_expr build_c_cast (location_t loc, tree type, cp_expr expr); extern tree cp_build_c_cast (tree, tree, tsubst_flags_t); extern cp_expr build_x_modify_expr (location_t, tree, enum tree_code, tree, tsubst_flags_t); extern tree cp_build_modify_expr (tree, enum tree_code, tree, tsubst_flags_t); extern tree convert_for_initialization (tree, tree, tree, int, impl_conv_rhs, tree, int, tsubst_flags_t); extern int comp_ptr_ttypes (tree, tree); extern bool comp_ptr_ttypes_const (tree, tree); extern bool error_type_p (const_tree); extern bool ptr_reasonably_similar (const_tree, const_tree); extern tree build_ptrmemfunc (tree, tree, int, bool, tsubst_flags_t); extern int cp_type_quals (const_tree); extern int type_memfn_quals (const_tree); extern cp_ref_qualifier type_memfn_rqual (const_tree); extern tree apply_memfn_quals (tree, cp_cv_quals, cp_ref_qualifier); extern bool cp_has_mutable_p (const_tree); extern bool at_least_as_qualified_p (const_tree, const_tree); extern void cp_apply_type_quals_to_decl (int, tree); extern tree build_ptrmemfunc1 (tree, tree, tree); extern void expand_ptrmemfunc_cst (tree, tree *, tree *); extern tree type_after_usual_arithmetic_conversions (tree, tree); extern tree common_pointer_type (tree, tree); extern tree composite_pointer_type (tree, tree, tree, tree, composite_pointer_operation, tsubst_flags_t); extern tree merge_types (tree, tree); extern tree strip_array_domain (tree); extern tree check_return_expr (tree, bool *); extern tree cp_build_binary_op (location_t, enum tree_code, tree, tree, tsubst_flags_t); extern tree build_x_vec_perm_expr (location_t, tree, tree, tree, tsubst_flags_t); #define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, true) extern tree build_simple_component_ref (tree, tree); extern tree build_ptrmemfunc_access_expr (tree, tree); extern tree build_address (tree); extern tree build_nop (tree, tree); extern tree non_reference (tree); extern tree lookup_anon_field (tree, tree); extern bool invalid_nonstatic_memfn_p (location_t, tree, tsubst_flags_t); extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t); extern tree convert_ptrmem (tree, tree, bool, bool, tsubst_flags_t); extern int lvalue_or_else (tree, enum lvalue_use, tsubst_flags_t); extern void check_template_keyword (tree); extern bool check_raw_literal_operator (const_tree decl); extern bool check_literal_operator_args (const_tree, bool *, bool *); extern void maybe_warn_about_useless_cast (tree, tree, tsubst_flags_t); extern tree cp_perform_integral_promotions (tree, tsubst_flags_t); extern tree finish_left_unary_fold_expr (tree, int); extern tree finish_right_unary_fold_expr (tree, int); extern tree finish_binary_fold_expr (tree, tree, int); /* in typeck2.c */ extern void require_complete_eh_spec_types (tree, tree); extern void cxx_incomplete_type_diagnostic (const_tree, const_tree, diagnostic_t); #undef cxx_incomplete_type_error extern void cxx_incomplete_type_error (const_tree, const_tree); #define cxx_incomplete_type_error(V,T) \ (cxx_incomplete_type_diagnostic ((V), (T), DK_ERROR)) extern void cxx_incomplete_type_inform (const_tree); extern tree error_not_base_type (tree, tree); extern tree binfo_or_else (tree, tree); extern void cxx_readonly_error (tree, enum lvalue_use); extern void complete_type_check_abstract (tree); extern int abstract_virtuals_error (tree, tree); extern int abstract_virtuals_error (abstract_class_use, tree); extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t); extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t); extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int); extern tree split_nonconstant_init (tree, tree); extern bool check_narrowing (tree, tree, tsubst_flags_t); extern tree digest_init (tree, tree, tsubst_flags_t); extern tree digest_init_flags (tree, tree, int, tsubst_flags_t); extern tree digest_nsdmi_init (tree, tree); extern tree build_scoped_ref (tree, tree, tree *); extern tree build_x_arrow (location_t, tree, tsubst_flags_t); extern tree build_m_component_ref (tree, tree, tsubst_flags_t); extern tree build_functional_cast (tree, tree, tsubst_flags_t); extern tree add_exception_specifier (tree, tree, int); extern tree merge_exception_specifiers (tree, tree); /* in mangle.c */ extern bool maybe_remove_implicit_alias (tree); extern void init_mangle (void); extern void mangle_decl (tree); extern const char *mangle_type_string (tree); extern tree mangle_typeinfo_for_type (tree); extern tree mangle_typeinfo_string_for_type (tree); extern tree mangle_vtbl_for_type (tree); extern tree mangle_vtt_for_type (tree); extern tree mangle_ctor_vtbl_for_type (tree, tree); extern tree mangle_thunk (tree, int, tree, tree); extern tree mangle_conv_op_name_for_type (tree); extern tree mangle_guard_variable (tree); extern tree mangle_tls_init_fn (tree); extern tree mangle_tls_wrapper_fn (tree); extern bool decl_tls_wrapper_p (tree); extern tree mangle_ref_init_variable (tree); extern char * get_mangled_vtable_map_var_name (tree); extern bool mangle_return_type_p (tree); /* in dump.c */ extern bool cp_dump_tree (void *, tree); /* In cp/cp-objcp-common.c. */ extern alias_set_type cxx_get_alias_set (tree); extern bool cxx_warn_unused_global_decl (const_tree); extern size_t cp_tree_size (enum tree_code); extern bool cp_var_mod_type_p (tree, tree); extern void cxx_initialize_diagnostics (diagnostic_context *); extern int cxx_types_compatible_p (tree, tree); extern void init_shadowed_var_for_decl (void); extern bool cxx_block_may_fallthru (const_tree); /* in cp-gimplify.c */ extern int cp_gimplify_expr (tree *, gimple_seq *, gimple_seq *); extern void cp_genericize (tree); extern bool cxx_omp_const_qual_no_mutable (tree); extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree); extern tree cxx_omp_clause_default_ctor (tree, tree, tree); extern tree cxx_omp_clause_copy_ctor (tree, tree, tree); extern tree cxx_omp_clause_assign_op (tree, tree, tree); extern tree cxx_omp_clause_dtor (tree, tree); extern void cxx_omp_finish_clause (tree, gimple_seq *); extern bool cxx_omp_privatize_by_reference (const_tree); extern bool cxx_omp_disregard_value_expr (tree, bool); extern void cp_fold_function (tree); extern tree cp_fully_fold (tree); extern void clear_fold_cache (void); /* in name-lookup.c */ extern void suggest_alternatives_for (location_t, tree); extern tree strip_using_decl (tree); /* in constraint.cc */ extern void init_constraint_processing (); extern bool constraint_p (tree); extern tree conjoin_constraints (tree, tree); extern tree conjoin_constraints (tree); extern tree get_constraints (tree); extern void set_constraints (tree, tree); extern void remove_constraints (tree); extern tree current_template_constraints (void); extern tree associate_classtype_constraints (tree); extern tree build_constraints (tree, tree); extern tree get_shorthand_constraints (tree); extern tree build_concept_check (tree, tree, tree = NULL_TREE); extern tree build_constrained_parameter (tree, tree, tree = NULL_TREE); extern tree make_constrained_auto (tree, tree); extern void placeholder_extract_concept_and_args (tree, tree&, tree&); extern bool equivalent_placeholder_constraints (tree, tree); extern hashval_t hash_placeholder_constraint (tree); extern bool deduce_constrained_parameter (tree, tree&, tree&); extern tree resolve_constraint_check (tree); extern tree check_function_concept (tree); extern tree finish_template_introduction (tree, tree); extern bool valid_requirements_p (tree); extern tree finish_concept_name (tree); extern tree finish_shorthand_constraint (tree, tree); extern tree finish_requires_expr (tree, tree); extern tree finish_simple_requirement (tree); extern tree finish_type_requirement (tree); extern tree finish_compound_requirement (tree, tree, bool); extern tree finish_nested_requirement (tree); extern void check_constrained_friend (tree, tree); extern tree tsubst_requires_expr (tree, tree, tsubst_flags_t, tree); extern tree tsubst_constraint (tree, tree, tsubst_flags_t, tree); extern tree tsubst_constraint_info (tree, tree, tsubst_flags_t, tree); extern bool function_concept_check_p (tree); extern tree normalize_expression (tree); extern tree expand_concept (tree, tree); extern bool expanding_concept (); extern tree evaluate_constraints (tree, tree); extern tree evaluate_function_concept (tree, tree); extern tree evaluate_variable_concept (tree, tree); extern tree evaluate_constraint_expression (tree, tree); extern bool constraints_satisfied_p (tree); extern bool constraints_satisfied_p (tree, tree); extern tree lookup_constraint_satisfaction (tree, tree); extern tree memoize_constraint_satisfaction (tree, tree, tree); extern tree lookup_concept_satisfaction (tree, tree); extern tree memoize_concept_satisfaction (tree, tree, tree); extern tree get_concept_expansion (tree, tree); extern tree save_concept_expansion (tree, tree, tree); extern bool* lookup_subsumption_result (tree, tree); extern bool save_subsumption_result (tree, tree, bool); extern bool equivalent_constraints (tree, tree); extern bool equivalently_constrained (tree, tree); extern bool subsumes_constraints (tree, tree); extern bool strictly_subsumes (tree, tree); extern int more_constrained (tree, tree); extern void diagnose_constraints (location_t, tree, tree); /* in logic.cc */ extern tree decompose_conclusions (tree); extern bool subsumes (tree, tree); /* in vtable-class-hierarchy.c */ extern void vtv_compute_class_hierarchy_transitive_closure (void); extern void vtv_generate_init_routine (void); extern void vtv_save_class_info (tree); extern void vtv_recover_class_info (void); extern void vtv_build_vtable_verify_fndecl (void); /* In cp-cilkplus.c. */ extern bool cpp_validate_cilk_plus_loop (tree); /* In cp/cp-array-notations.c */ extern tree expand_array_notation_exprs (tree); bool cilkplus_an_triplet_types_ok_p (location_t, tree, tree, tree, tree); /* In constexpr.c */ extern void fini_constexpr (void); extern bool literal_type_p (tree); extern tree register_constexpr_fundef (tree, tree); extern bool check_constexpr_ctor_body (tree, tree, bool); extern tree ensure_literal_type_for_constexpr_object (tree); extern bool potential_constant_expression (tree); extern bool potential_nondependent_constant_expression (tree); extern bool potential_nondependent_static_init_expression (tree); extern bool potential_static_init_expression (tree); extern bool potential_rvalue_constant_expression (tree); extern bool require_potential_constant_expression (tree); extern bool require_potential_rvalue_constant_expression (tree); extern tree cxx_constant_value (tree, tree = NULL_TREE); extern tree maybe_constant_value (tree, tree = NULL_TREE); extern tree maybe_constant_init (tree, tree = NULL_TREE); extern tree fold_non_dependent_expr (tree); extern tree fold_simple (tree); extern bool is_sub_constant_expr (tree); extern bool reduced_constant_expression_p (tree); extern bool is_instantiation_of_constexpr (tree); extern bool var_in_constexpr_fn (tree); extern void explain_invalid_constexpr_fn (tree); extern vec<tree> cx_error_context (void); extern tree fold_sizeof_expr (tree); extern void clear_cv_and_fold_caches (void); /* In c-family/cilk.c */ extern bool cilk_valid_spawn (tree); /* In cp-ubsan.c */ extern void cp_ubsan_maybe_instrument_member_call (tree); extern void cp_ubsan_instrument_member_accesses (tree *); extern tree cp_ubsan_maybe_instrument_downcast (location_t, tree, tree, tree); extern tree cp_ubsan_maybe_instrument_cast_to_vbase (location_t, tree, tree); extern void cp_ubsan_maybe_initialize_vtbl_ptrs (tree); /* -- end of C++ */ #endif /* ! GCC_CP_TREE_H */
unified_shared_memory.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu #include <stdio.h> // The runtime considers unified shared memory to be always present. #pragma omp requires unified_shared_memory int main() { int i; // CHECK-NOT: Libomptarget #pragma omp target data map(alloc: i) #pragma omp target map(present, alloc: i) ; // CHECK: i is present fprintf(stderr, "i is present\n"); // CHECK-NOT: Libomptarget #pragma omp target map(present, alloc: i) ; // CHECK: is present fprintf(stderr, "i is present\n"); return 0; }
chap_fmt_plug.c
/* * iSCSI CHAP authentication cracker. Hacked together during September of 2012 * by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format : CHAP_N(username):$chap$id*challenge*response * * References: * * ftp://ftp.samba.org/pub/unpacked/ppp/pppd/chap-md5.c * http://www.blackhat.com/presentations/bh-usa-05/bh-us-05-Dwivedi-update.pdf * http://www.willhackforsushi.com/presentations/PEAP_Shmoocon2008_Wright_Antoniewicz.pdf * * https://tools.ietf.org/html/rfc2865 -> The CHAP challenge value is found in * the CHAP-Challenge Attribute (60) if present in the packet, otherwise in the * Request Authenticator field. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_chap; #elif FMT_REGISTERS_H john_register_one(&fmt_chap); #else #include <string.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 65536 // core i7 no HT #endif #endif #endif #include "arch.h" #include "misc.h" #include "md5.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "chap" #define FORMAT_NAME "iSCSI CHAP authentication / EAP-MD5" #define FORMAT_TAG "$chap$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #define SALT_SIZE sizeof(struct custom_salt) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests chap_tests[] = { {"$chap$0*cc7e5247514551acdcbf782c4027bfb1*fdfdad5277812ae40956a66f3db23308", "password"}, {"$chap$0*81a49cb700e8c2ee9bc3852a506406c3*8876e228962a999637eecc2423f55f07", "password"}, {"$chap$0*e270954e7d84f99535dce2e5d7340a7d*4d64f587c7b5248406b939e1e9abeb74", "bar"}, // EAP-MD5 hashes are also supported! {"$chap$2*d7ec2fff2ada437f9dcd4e3b0df44d50*1ffc6c2659bc5bb94144fd01eb756e37", "beaVIs"}, {"$chap$2*00000000000000000000000000000000*9920418b3103652d3b80ffff04da5863", "bradtest"}, // RADIUS EAP-MD5 hash {"$chap$1*266b0e9a58322f4d01ab25b35f879464*c9f9769597e320843f5f2af7b8f1c9bd", "S0cc3r"}, // RADIUS CHAP authentication is supported too {"$chap$238*98437c9fd4cb5f446202c0b1ffab2592*050d578a292a4bfd9f030d2797919687", "hello"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { unsigned char id; /* CHAP_I */ unsigned char challenge[32]; /* CHAP_C */ int challenge_length; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int len, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* id */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* challenge */ goto err; len = strlen(p); if (len > 64 || (len&1)) goto err; if (hexlenl(p, &extra) != len || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* binary */ goto err; if (hexlenl(p, &extra) != BINARY_SIZE*2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$chap$" */ p = strtokm(ctcopy, "*"); cs.id = atoi(p); p = strtokm(NULL, "*"); cs.challenge_length = strlen(p) / 2; for (i = 0; i < cs.challenge_length; i++) cs.challenge[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; /* CHAP_R */ } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, &cur_salt->id, 1); MD5_Update(&ctx, saved_key[index], strlen(saved_key[index])); MD5_Update(&ctx, cur_salt->challenge, cur_salt->challenge_length); MD5_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void chap_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_chap = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, chap_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, chap_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
pi_omp_private.c
/* * Tecnologico de Costa Rica (www.tec.ac.cr) * Course: MP-6171 High Performance Embedded Systems * Developers Name: Verny Morales and Luis Carlos Alvarez * Developers email: verny.morales@gmail.com and lcam03@gmail.com * General purpose: * Input: * Output: * */ //gcc -fopenmp pi_omp_private.c -o pi_omp_private //./pi_omp_private #include <omp.h> #include <stdio.h> static long num_steps = 1000000000; double step; void main () { int i; double x, pi, sum = 0.0; double start_time, run_time; int num_threads = 0; step = 1.0/(double) num_steps; start_time = omp_get_wtime(); //Creates a team of OpenMP threads that execute the region #pragma omp parallel { //reduction -> Specifies a reduction-identifier and one or more list items // In order to specify the reduction in OpenMP, we must provide // an operation (+ / * / o) // and a reduction variable //private -> Declares list items to be private to a task #pragma omp for reduction(+:sum) private(x) for (i=0; i< num_steps; i++){ x = (i+0.5)*step; sum = sum + 4.0/(1.0+x*x); } //num_threads = omp_get_num_threads(); } pi = step * sum; run_time = omp_get_wtime() - start_time; printf("pi with %ld steps is %lf in %lf seconds\n", num_steps, pi,run_time); printf("Number of threads = %d\n", num_threads); }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 29; static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<unsigned> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FPOptionsOverride(FpPragmaStack.CurrentValue); } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } unsigned getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; unsigned OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal swift_name /// attribute for the decl \p D. Raise a diagnostic if the name is invalid /// for the given declaration. /// /// For a function, this will validate a compound Swift name, /// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, /// and the function will output the number of parameter names, and whether /// this is a single-arg initializer. /// /// For a type, enum constant, property, or variable declaration, this will /// validate either a simple identifier, or a qualified /// <code>context.identifier</code> name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation ArgLoc, const IdentifierInfo *AttrName, bool IsAsync); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as a non-type, and an expression representing /// that name has been formed. NC_ContextIndependentExpr, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification ContextIndependentExpr(ExprResult E) { NameClassification Result(NC_ContextIndependentExpr); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_ContextIndependentExpr); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name, bool Override); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID, SourceLocation Loc); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand); ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E); ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType Ty); ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, Expr *E); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied because it was ill-formed. void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation, StringRef Diagnostic); void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old, SourceLocation New); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block, /// A type constraint, UPPC_TypeConstraint }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called to set rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = std::string(Ext); } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. FunctionDecl * ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S, Declarator &D); /// Register \p FD as specialization of \p BaseFD in the current `omp /// begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( FunctionDecl *FD, FunctionDecl *BaseFD); public: /// Can we exit a scope at the moment. bool isInOpenMPDeclareVariantScope() { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); /// Check if the expression is allowed to be used in expressions for the /// offloading devices. void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, int ArgNum); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Nullable_result = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
AsynchronousUpdate.c
#include "AsynchronousUpdate.h" #include <stdio.h> #include <stdlib.h> #include "constants.h" #include <gsl/gsl_rng.h> #include <omp.h> int getBool(gsl_rng* r) { return gsl_rng_uniform_int(r,2); } void singleStateUpdate(int n, int* state, int** topology, int seed, int* fixed_nodes) { //Perform Asynchronous Updates gsl_rng* asyncer = gsl_rng_alloc(gsl_rng_ranlxs2); gsl_rng_set(asyncer,seed); for (int i = 0; i < ITER_ASYNCHRONOUS; ++i) { int node_to_update = gsl_rng_uniform_int(asyncer,n); if(fixed_nodes[node_to_update] == NORMAL_FLAG) { int val_temp = 0; for (int j = 0; j < n; ++j) { val_temp+= state[j]*topology[node_to_update][j]; } if(val_temp > 0) { state[node_to_update] = 1; } else if(val_temp<0) { state[node_to_update] = LOWER_EXPRESSION; } } } //Free the rng gsl_rng_free(asyncer); } void getFinalState(int n, int state[n], gsl_rng* rangen, int** topology, int* fixed_nodes, int i) { #pragma omp parallel for for (int j = 0; j < n; ++j) { if(fixed_nodes[j]==NORMAL_FLAG) { #pragma omp critical(rand) { state[j] = getBool(rangen); } if(state[j]==0) { state[j] = LOWER_EXPRESSION; } } else{ state[j] = fixed_nodes[j]; } } singleStateUpdate(n,state,topology,i+1,fixed_nodes); } asynclist* updateAsyncList(int n, int** topology, int* fixed_nodes, int height, int seed_init) { if(height) { if(height%2) { gsl_rng* rangen = gsl_rng_alloc(gsl_rng_ranlxs2); gsl_rng_set(rangen,seed_init); asynclist* to_merge[2]; int seed[2] = {gsl_rng_get(rangen),gsl_rng_get(rangen)}; int i; #pragma omp parallel for shared(n,topology,fixed_nodes,seed,height) private(i) for (i = 0; i < 2; ++i) { to_merge[i] = updateAsyncList(n,topology,fixed_nodes,height-1,seed[i]); } return merge_asynclist(to_merge[0],to_merge[1],n); } else { gsl_rng* rangen = gsl_rng_alloc(gsl_rng_taus2); gsl_rng_set(rangen,seed_init); asynclist* to_merge[2]; int seed[2] = {gsl_rng_get(rangen),gsl_rng_get(rangen)}; int i; #pragma omp parallel for shared(n,topology,fixed_nodes,seed,height) private(i) for (i = 0; i < 2; ++i) { to_merge[i] = updateAsyncList(n,topology,fixed_nodes,height-1,seed[i]); } return merge_asynclist(to_merge[0],to_merge[1],n); } } else{ asynclist* list = (asynclist*)malloc(sizeof(asynclist)); init_asynclist(list); int state[n]; gsl_rng* rangen = gsl_rng_alloc(gsl_rng_ranlxs2); gsl_rng_set(rangen,seed_init); for (int i = 0; i < n; ++i) { if(fixed_nodes[i] == NORMAL_FLAG) { state[i] = gsl_rng_uniform_int(rangen,2); if(state[i]==0) { state[i] = LOWER_EXPRESSION; } } else { state[i] = fixed_nodes[i]; } } singleStateUpdate(n,state,topology,gsl_rng_get(rangen),fixed_nodes); list->n_elements = 1; asynclistnode* node = (asynclistnode*)malloc(sizeof(asynclistnode)); if(!node) { exit(10); } node->n_occurances = 1; node->state = (int*)malloc(n*sizeof(int)); if(!(node->state)) { exit(11); } for (int i = 0; i < n; ++i) { node->state[i] = state[i]; } node->next = NULL; node->prev = NULL; list->next = node; return list; } } void updateAsyncTree(int n, int** topology, int* fixed_nodes, base* stable) { int iter = (1<<N_SAMPLES); gsl_rng* rangen = gsl_rng_alloc(gsl_rng_ranlxs2); gsl_rng_set(rangen,0); #pragma omp parallel for for (int i = 0; i < iter; ++i) { /* gsl_rng* rangen = gsl_rng_alloc(gsl_rng_ranlxs2); gsl_rng_set(rangen,i); */ //int* state=(int*)malloc(n*sizeof(int)); int state[n]; getFinalState(n,state,rangen,topology,fixed_nodes,i); #pragma omp critical(addToTree) { add_node(stable,n,state); } //free(state); } gsl_rng_free(rangen); }
dct2_fft2.h
/** * @file dct2_fft2.h * @author Zixuan Jiang, Jiaqi Gu * @date Aug 2019 * @brief All the transforms in this file are implemented based on 2D FFT. * Each transfrom has three steps, 1) preprocess, 2) 2d fft or 2d ifft, 3) * postprocess. */ #ifndef DREAMPLACE_DCT2_FFT2_H #define DREAMPLACE_DCT2_FFT2_H #include <float.h> #include <math.h> #include "utility/src/torch.h" #include "utility/src/utils.h" DREAMPLACE_BEGIN_NAMESPACE void dct2_fft2_forward(at::Tensor x, at::Tensor expkM, at::Tensor expkN, at::Tensor out, at::Tensor buf, int num_threads); void idct2_fft2_forward(at::Tensor x, at::Tensor expkM, at::Tensor expkN, at::Tensor out, at::Tensor buf, int num_threads); void idct_idxst_forward(at::Tensor x, at::Tensor expkM, at::Tensor expkN, at::Tensor out, at::Tensor buf, int num_threads); void idxst_idct_forward(at::Tensor x, at::Tensor expkM, at::Tensor expkN, at::Tensor out, at::Tensor buf, int num_threads); inline int INDEX(const int hid, const int wid, const int N) { return (hid * N + wid); } template <typename T> void dct2dPreprocessCpu(const T* x, T* y, const int M, const int N, int num_threads) { int halfN = N / 2; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < M; ++hid) { for (int wid = 0; wid < N; ++wid) { int index; int cond = (((hid & 1) == 0) << 1) | ((wid & 1) == 0); switch (cond) { case 0: index = INDEX(2 * M - (hid + 1), N - (wid + 1) / 2, halfN); break; case 1: index = INDEX(2 * M - (hid + 1), wid / 2, halfN); break; case 2: index = INDEX(hid, N - (wid + 1) / 2, halfN); break; case 3: index = INDEX(hid, wid / 2, halfN); break; default: break; } y[index] = x[INDEX(hid, wid, N)]; } } } template <typename T> void dct2dPreprocessCpuLauncher(const T* x, T* y, const int M, const int N, int num_threads) { dct2dPreprocessCpu<T>(x, y, M, N, num_threads); } template <typename T, typename TComplex> void dct2dPostprocessCpu(const TComplex* V, T* y, const int M, const int N, const TComplex* expkM, const TComplex* expkN, int num_threads) { int halfM = M / 2; int halfN = N / 2; T four_over_MN = (T)(4. / (M * N)); T two_over_MN = (T)(2. / (M * N)); #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < halfM; ++hid) { for (int wid = 0; wid < halfN; ++wid) { int cond = ((hid != 0) << 1) | (wid != 0); switch (cond) { case 0: { y[0] = V[0].x * four_over_MN; y[halfN] = RealPartOfMul(expkN[halfN], V[halfN]) * four_over_MN; y[INDEX(halfM, 0, N)] = expkM[halfM].x * V[INDEX(halfM, 0, halfN + 1)].x * four_over_MN; y[INDEX(halfM, halfN, N)] = expkM[halfM].x * RealPartOfMul(expkN[halfN], V[INDEX(halfM, halfN, halfN + 1)]) * four_over_MN; break; } case 1: { ComplexType<T> tmp; tmp = V[wid]; y[wid] = RealPartOfMul(expkN[wid], tmp) * four_over_MN; y[N - wid] = -ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN; tmp = V[INDEX(halfM, wid, halfN + 1)]; y[INDEX(halfM, wid, N)] = expkM[halfM].x * RealPartOfMul(expkN[wid], tmp) * four_over_MN; y[INDEX(halfM, N - wid, N)] = -expkM[halfM].x * ImaginaryPartOfMul(expkN[wid], tmp) * four_over_MN; break; } case 2: { ComplexType<T> tmp1, tmp2, tmp_up, tmp_down; tmp1 = V[INDEX(hid, 0, halfN + 1)]; tmp2 = V[INDEX(M - hid, 0, halfN + 1)]; tmp_up.x = expkM[hid].x * (tmp1.x + tmp2.x) + expkM[hid].y * (tmp2.y - tmp1.y); tmp_down.x = -expkM[hid].y * (tmp1.x + tmp2.x) + expkM[hid].x * (tmp2.y - tmp1.y); y[INDEX(hid, 0, N)] = tmp_up.x * two_over_MN; y[INDEX(M - hid, 0, N)] = tmp_down.x * two_over_MN; tmp1 = complexAdd(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]); tmp2 = complexSubtract(V[INDEX(hid, halfN, halfN + 1)], V[INDEX(M - hid, halfN, halfN + 1)]); tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y; tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x; tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y; tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x; y[INDEX(hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_up) * two_over_MN; y[INDEX(M - hid, halfN, N)] = RealPartOfMul(expkN[halfN], tmp_down) * two_over_MN; break; } case 3: { ComplexType<T> tmp1, tmp2, tmp_up, tmp_down; tmp1 = complexAdd(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]); tmp2 = complexSubtract(V[INDEX(hid, wid, halfN + 1)], V[INDEX(M - hid, wid, halfN + 1)]); tmp_up.x = expkM[hid].x * tmp1.x - expkM[hid].y * tmp2.y; tmp_up.y = expkM[hid].x * tmp1.y + expkM[hid].y * tmp2.x; tmp_down.x = -expkM[hid].y * tmp1.x - expkM[hid].x * tmp2.y; tmp_down.y = -expkM[hid].y * tmp1.y + expkM[hid].x * tmp2.x; y[INDEX(hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_up) * two_over_MN; y[INDEX(M - hid, wid, N)] = RealPartOfMul(expkN[wid], tmp_down) * two_over_MN; y[INDEX(hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_up) * two_over_MN; y[INDEX(M - hid, N - wid, N)] = -ImaginaryPartOfMul(expkN[wid], tmp_down) * two_over_MN; break; } default: assert(0); break; } } } } template <typename T> void dct2dPostprocessCpuLauncher(const T* x, T* y, const int M, const int N, const T* expkM, const T* expkN, int num_threads) { dct2dPostprocessCpu<T, ComplexType<T>>((ComplexType<T>*)x, y, M, N, (ComplexType<T>*)expkM, (ComplexType<T>*)expkN, num_threads); } template <typename T, typename TComplex> void idct2_fft2PreprocessCpu(const T* input, TComplex* output, const int M, const int N, const TComplex* expkM, const TComplex* expkN, int num_threads) { const int halfM = M / 2; const int halfN = N / 2; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < halfM; ++hid) { for (int wid = 0; wid < halfN; ++wid) { int cond = ((hid != 0) << 1) | (wid != 0); switch (cond) { case 0: { T tmp1; TComplex tmp_up; output[0].x = input[0]; output[0].y = 0; tmp1 = input[halfN]; tmp_up.x = tmp1; tmp_up.y = tmp1; output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up)); tmp1 = input[INDEX(halfM, 0, N)]; tmp_up.x = tmp1; tmp_up.y = tmp1; output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up)); tmp1 = input[INDEX(halfM, halfN, N)]; tmp_up.x = 0; tmp_up.y = 2 * tmp1; output[INDEX(halfM, halfN, halfN + 1)] = complexConj( complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up)); break; } case 1: { TComplex tmp_up; tmp_up.x = input[wid]; tmp_up.y = input[N - wid]; output[wid] = complexConj(complexMul(expkN[wid], tmp_up)); T tmp1 = input[INDEX(halfM, wid, N)]; T tmp2 = input[INDEX(halfM, N - wid, N)]; tmp_up.x = tmp1 - tmp2; tmp_up.y = tmp1 + tmp2; output[INDEX(halfM, wid, halfN + 1)] = complexConj( complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up)); break; } case 2: { T tmp1, tmp3; TComplex tmp_up, tmp_down; tmp1 = input[INDEX(hid, 0, N)]; tmp3 = input[INDEX(M - hid, 0, N)]; tmp_up.x = tmp1; tmp_up.y = tmp3; tmp_down.x = tmp3; tmp_down.y = tmp1; output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up)); output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down)); tmp1 = input[INDEX(hid, halfN, N)]; tmp3 = input[INDEX(M - hid, halfN, N)]; tmp_up.x = tmp1 - tmp3; tmp_up.y = tmp3 + tmp1; tmp_down.x = tmp3 - tmp1; tmp_down.y = tmp1 + tmp3; output[INDEX(hid, halfN, halfN + 1)] = complexConj( complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up)); output[INDEX(M - hid, halfN, halfN + 1)] = complexConj( complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down)); break; } case 3: { T tmp1 = input[INDEX(hid, wid, N)]; T tmp2 = input[INDEX(hid, N - wid, N)]; T tmp3 = input[INDEX(M - hid, wid, N)]; T tmp4 = input[INDEX(M - hid, N - wid, N)]; TComplex tmp_up, tmp_down; tmp_up.x = tmp1 - tmp4; tmp_up.y = tmp3 + tmp2; tmp_down.x = tmp3 - tmp2; tmp_down.y = tmp1 + tmp4; output[INDEX(hid, wid, halfN + 1)] = complexConj( complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up)); output[INDEX(M - hid, wid, halfN + 1)] = complexConj( complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down)); break; } default: assert(0); break; } } } } template <typename T> void idct2_fft2PreprocessCpuLauncher(const T* x, T* y, const int M, const int N, const T* expkM, const T* expkN, int num_threads) { idct2_fft2PreprocessCpu<T, ComplexType<T>>( x, (ComplexType<T>*)y, M, N, (ComplexType<T>*)expkM, (ComplexType<T>*)expkN, num_threads); } template <typename T> void idct2_fft2PostprocessCpu(const T* x, T* y, const int M, const int N, int num_threads) { int MN = M * N; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < M; ++hid) { for (int wid = 0; wid < N; ++wid) { int cond = ((hid < M / 2) << 1) | (wid < N / 2); int index; switch (cond) { case 0: index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N); break; case 1: index = INDEX(((M - hid) << 1) - 1, wid << 1, N); break; case 2: index = INDEX(hid << 1, ((N - wid) << 1) - 1, N); break; case 3: index = INDEX(hid << 1, wid << 1, N); break; default: assert(0); break; } y[index] = x[INDEX(hid, wid, N)] * MN; } } } template <typename T> void idct2_fft2PostprocessCpuLauncher(const T* x, T* y, const int M, const int N, int num_threads) { idct2_fft2PostprocessCpu<T>(x, y, M, N, num_threads); } template <typename T, typename TComplex> void idct_idxstPreprocessCpu(const T* input, TComplex* output, const int M, const int N, const TComplex* expkM, const TComplex* expkN, int num_threads) { int halfM = M / 2; int halfN = N / 2; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < halfM; ++hid) { for (int wid = 0; wid < halfN; ++wid) { int cond = ((hid != 0) << 1) | (wid != 0); switch (cond) { case 0: { T tmp1; TComplex tmp_up; output[0].x = 0; output[0].y = 0; tmp1 = input[halfN]; tmp_up.x = tmp1; tmp_up.y = tmp1; output[halfN] = complexConj(complexMul(expkN[halfN], tmp_up)); output[INDEX(halfM, 0, halfN + 1)].x = 0; output[INDEX(halfM, 0, halfN + 1)].y = 0; tmp1 = input[INDEX(halfM, halfN, N)]; tmp_up.x = 0; tmp_up.y = 2 * tmp1; output[INDEX(halfM, halfN, halfN + 1)] = complexConj( complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up)); break; } case 1: { TComplex tmp_up; tmp_up.x = input[N - wid]; tmp_up.y = input[wid]; output[wid] = complexConj(complexMul(expkN[wid], tmp_up)); T tmp1 = input[INDEX(halfM, N - wid, N)]; T tmp2 = input[INDEX(halfM, wid, N)]; tmp_up.x = tmp1 - tmp2; tmp_up.y = tmp1 + tmp2; output[INDEX(halfM, wid, halfN + 1)] = complexConj( complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up)); break; } case 2: { T tmp1, tmp3; TComplex tmp_up, tmp_down; output[INDEX(hid, 0, halfN + 1)].x = 0; output[INDEX(hid, 0, halfN + 1)].y = 0; output[INDEX(M - hid, 0, halfN + 1)].x = 0; output[INDEX(M - hid, 0, halfN + 1)].y = 0; tmp1 = input[INDEX(hid, halfN, N)]; tmp3 = input[INDEX(M - hid, halfN, N)]; tmp_up.x = tmp1 - tmp3; tmp_up.y = tmp3 + tmp1; tmp_down.x = tmp3 - tmp1; tmp_down.y = tmp1 + tmp3; output[INDEX(hid, halfN, halfN + 1)] = complexConj( complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up)); output[INDEX(M - hid, halfN, halfN + 1)] = complexConj( complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down)); break; } case 3: { T tmp1 = input[INDEX(hid, N - wid, N)]; T tmp2 = input[INDEX(hid, wid, N)]; T tmp3 = input[INDEX(M - hid, N - wid, N)]; T tmp4 = input[INDEX(M - hid, wid, N)]; TComplex tmp_up, tmp_down; tmp_up.x = tmp1 - tmp4; tmp_up.y = tmp3 + tmp2; tmp_down.x = tmp3 - tmp2; tmp_down.y = tmp1 + tmp4; output[INDEX(hid, wid, halfN + 1)] = complexConj( complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up)); output[INDEX(M - hid, wid, halfN + 1)] = complexConj( complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down)); break; } default: assert(0); break; } } } } template <typename T> void idct_idxstPreprocessCpuLauncher(const T* x, T* y, const int M, const int N, const T* expkM, const T* expkN, int num_threads) { idct_idxstPreprocessCpu<T, ComplexType<T>>( x, (ComplexType<T>*)y, M, N, (ComplexType<T>*)expkM, (ComplexType<T>*)expkN, num_threads); } template <typename T> void idct_idxstPostprocessCpu(const T* x, T* y, const int M, const int N, int num_threads) { // const int halfN = N / 2; const int MN = M * N; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < M; ++hid) { for (int wid = 0; wid < N; ++wid) { int cond = ((hid < M / 2) << 1) | (wid < N / 2); int index; switch (cond) { case 0: index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N); y[index] = -x[INDEX(hid, wid, N)] * MN; break; case 1: index = INDEX(((M - hid) << 1) - 1, wid << 1, N); y[index] = x[INDEX(hid, wid, N)] * MN; break; case 2: index = INDEX(hid << 1, ((N - wid) << 1) - 1, N); y[index] = -x[INDEX(hid, wid, N)] * MN; break; case 3: index = INDEX(hid << 1, wid << 1, N); y[index] = x[INDEX(hid, wid, N)] * MN; break; default: assert(0); break; } } } } template <typename T> void idct_idxstPostprocessCpuLauncher(const T* x, T* y, const int M, const int N, int num_threads) { idct_idxstPostprocessCpu<T>(x, y, M, N, num_threads); } template <typename T, typename TComplex> void idxst_idctPreprocessCpu(const T* input, TComplex* output, const int M, const int N, const TComplex* expkM, const TComplex* expkN, int num_threads) { const int halfM = M / 2; const int halfN = N / 2; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < halfM; ++hid) { for (int wid = 0; wid < halfN; ++wid) { int cond = ((hid != 0) << 1) | (wid != 0); switch (cond) { case 0: { T tmp1; TComplex tmp_up; output[0].x = 0; output[0].y = 0; output[halfN].x = 0; output[halfN].y = 0; tmp1 = input[INDEX(halfM, 0, N)]; tmp_up.x = tmp1; tmp_up.y = tmp1; output[INDEX(halfM, 0, halfN + 1)] = complexConj(complexMul(expkM[halfM], tmp_up)); tmp1 = input[INDEX(halfM, halfN, N)]; tmp_up.x = 0; tmp_up.y = 2 * tmp1; output[INDEX(halfM, halfN, halfN + 1)] = complexConj( complexMul(complexMul(expkM[halfM], expkN[halfN]), tmp_up)); break; } case 1: { output[wid].x = 0; output[wid].y = 0; TComplex tmp_up; T tmp1 = input[INDEX(halfM, wid, N)]; T tmp2 = input[INDEX(halfM, N - wid, N)]; tmp_up.x = tmp1 - tmp2; tmp_up.y = tmp1 + tmp2; output[INDEX(halfM, wid, halfN + 1)] = complexConj( complexMul(complexMul(expkM[halfM], expkN[wid]), tmp_up)); break; } case 2: { T tmp1, tmp3; TComplex tmp_up, tmp_down; tmp1 = input[INDEX(M - hid, 0, N)]; tmp3 = input[INDEX(hid, 0, N)]; tmp_up.x = tmp1; tmp_up.y = tmp3; tmp_down.x = tmp3; tmp_down.y = tmp1; output[INDEX(hid, 0, halfN + 1)] = complexConj(complexMul(expkM[hid], tmp_up)); output[INDEX(M - hid, 0, halfN + 1)] = complexConj(complexMul(expkM[M - hid], tmp_down)); tmp1 = input[INDEX(M - hid, halfN, N)]; tmp3 = input[INDEX(hid, halfN, N)]; tmp_up.x = tmp1 - tmp3; tmp_up.y = tmp3 + tmp1; tmp_down.x = tmp3 - tmp1; tmp_down.y = tmp1 + tmp3; output[INDEX(hid, halfN, halfN + 1)] = complexConj( complexMul(complexMul(expkM[hid], expkN[halfN]), tmp_up)); output[INDEX(M - hid, halfN, halfN + 1)] = complexConj( complexMul(complexMul(expkM[M - hid], expkN[halfN]), tmp_down)); break; } case 3: { T tmp1 = input[INDEX(M - hid, wid, N)]; T tmp2 = input[INDEX(M - hid, N - wid, N)]; T tmp3 = input[INDEX(hid, wid, N)]; T tmp4 = input[INDEX(hid, N - wid, N)]; TComplex tmp_up, tmp_down; tmp_up.x = tmp1 - tmp4; tmp_up.y = tmp3 + tmp2; tmp_down.x = tmp3 - tmp2; tmp_down.y = tmp1 + tmp4; output[INDEX(hid, wid, halfN + 1)] = complexConj( complexMul(complexMul(expkM[hid], expkN[wid]), tmp_up)); output[INDEX(M - hid, wid, halfN + 1)] = complexConj( complexMul(complexMul(expkM[M - hid], expkN[wid]), tmp_down)); break; } default: assert(0); break; } } } } template <typename T> void idxst_idctPreprocessCpuLauncher(const T* x, T* y, const int M, const int N, const T* expkM, const T* expkN, int num_threads) { idxst_idctPreprocessCpu<T, ComplexType<T>>( x, (ComplexType<T>*)y, M, N, (ComplexType<T>*)expkM, (ComplexType<T>*)expkN, num_threads); } template <typename T> void idxst_idctPostprocessCpu(const T* x, T* y, const int M, const int N, int num_threads) { // const int halfN = N / 2; const int MN = M * N; #pragma omp parallel for num_threads(num_threads) for (int hid = 0; hid < M; ++hid) { for (int wid = 0; wid < N; ++wid) { int cond = ((hid < M / 2) << 1) | (wid < N / 2); int index; switch (cond) { case 0: index = INDEX(((M - hid) << 1) - 1, ((N - wid) << 1) - 1, N); y[index] = -x[INDEX(hid, wid, N)] * MN; break; case 1: index = INDEX(((M - hid) << 1) - 1, wid << 1, N); y[index] = -x[INDEX(hid, wid, N)] * MN; break; case 2: index = INDEX(hid << 1, ((N - wid) << 1) - 1, N); y[index] = x[INDEX(hid, wid, N)] * MN; break; case 3: index = INDEX(hid << 1, wid << 1, N); y[index] = x[INDEX(hid, wid, N)] * MN; break; default: assert(0); break; } } } } template <typename T> void idxst_idctPostprocessCpuLauncher(const T* x, T* y, const int M, const int N, int num_threads) { idxst_idctPostprocessCpu<T>(x, y, M, N, num_threads); } DREAMPLACE_END_NAMESPACE #endif
convolutiondepthwise_3x3_pack8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00 = _mm256_loadu_ps(k0); __m256 _k01 = _mm256_loadu_ps(k0 + 8); __m256 _k02 = _mm256_loadu_ps(k0 + 16); __m256 _k10 = _mm256_loadu_ps(k0 + 24); __m256 _k11 = _mm256_loadu_ps(k0 + 32); __m256 _k12 = _mm256_loadu_ps(k0 + 40); __m256 _k20 = _mm256_loadu_ps(k0 + 48); __m256 _k21 = _mm256_loadu_ps(k0 + 56); __m256 _k22 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1); __m256 _sum2 = _bias0; __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k00, _r02, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k01, _r03, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k02, _r04, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k10, _r12, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k11, _r13, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k12, _r14, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k20, _r22, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k21, _r23, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k22, _r24, _sum2); __m256 _sum3 = _bias0; __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r15 = _mm256_loadu_ps(r1 + 40); __m256 _r25 = _mm256_loadu_ps(r2 + 40); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k00, _r03, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k01, _r04, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k02, _r05, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k10, _r13, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k11, _r14, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k12, _r15, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k20, _r23, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k21, _r24, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k22, _r25, _sum3); __m256 _sum4 = _bias0; __m256 _r06 = _mm256_loadu_ps(r0 + 48); __m256 _r16 = _mm256_loadu_ps(r1 + 48); __m256 _r26 = _mm256_loadu_ps(r2 + 48); _mm256_storeu_ps(outptr0 + 24, _sum3); _sum4 = _mm256_comp_fmadd_ps(_k00, _r04, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k01, _r05, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k02, _r06, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k10, _r14, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k11, _r15, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k12, _r16, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k20, _r24, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k21, _r25, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k22, _r26, _sum4); __m256 _sum5 = _bias0; __m256 _r07 = _mm256_loadu_ps(r0 + 56); __m256 _r17 = _mm256_loadu_ps(r1 + 56); __m256 _r27 = _mm256_loadu_ps(r2 + 56); _mm256_storeu_ps(outptr0 + 32, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k00, _r05, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k01, _r06, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k02, _r07, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k10, _r15, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k11, _r16, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k12, _r17, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k20, _r25, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k21, _r26, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k22, _r27, _sum5); __m256 _sum6 = _bias0; __m256 _r08 = _mm256_loadu_ps(r0 + 64); __m256 _r18 = _mm256_loadu_ps(r1 + 64); __m256 _r28 = _mm256_loadu_ps(r2 + 64); _mm256_storeu_ps(outptr0 + 40, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k00, _r06, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k01, _r07, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k02, _r08, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k10, _r16, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k11, _r17, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k12, _r18, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k20, _r26, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k21, _r27, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k22, _r28, _sum6); __m256 _sum7 = _bias0; __m256 _r09 = _mm256_loadu_ps(r0 + 72); __m256 _r19 = _mm256_loadu_ps(r1 + 72); __m256 _r29 = _mm256_loadu_ps(r2 + 72); _mm256_storeu_ps(outptr0 + 48, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k00, _r07, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k01, _r08, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k02, _r09, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k10, _r17, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k11, _r18, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k12, _r19, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k20, _r27, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k21, _r28, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k22, _r29, _sum7); _mm256_storeu_ps(outptr0 + 56, _sum7); r0 += 64; r1 += 64; r2 += 64; outptr0 += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1); __m256 _sum2 = _bias0; __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k00, _r02, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k01, _r03, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k02, _r04, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k10, _r12, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k11, _r13, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k12, _r14, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k20, _r22, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k21, _r23, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k22, _r24, _sum2); __m256 _sum3 = _bias0; __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r15 = _mm256_loadu_ps(r1 + 40); __m256 _r25 = _mm256_loadu_ps(r2 + 40); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k00, _r03, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k01, _r04, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k02, _r05, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k10, _r13, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k11, _r14, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k12, _r15, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k20, _r23, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k21, _r24, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k22, _r25, _sum3); _mm256_storeu_ps(outptr0 + 24, _sum3); r0 += 32; r1 += 32; r2 += 32; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1); _mm256_storeu_ps(outptr0 + 8, _sum1); r0 += 16; r1 += 16; r2 += 16; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); _mm256_storeu_ps(outptr0, _sum0); r0 += 8; r1 += 8; r2 += 8; outptr0 += 8; } r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; } } } static void convdw3x3s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 8; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00 = _mm256_loadu_ps(k0); __m256 _k01 = _mm256_loadu_ps(k0 + 8); __m256 _k02 = _mm256_loadu_ps(k0 + 16); __m256 _k10 = _mm256_loadu_ps(k0 + 24); __m256 _k11 = _mm256_loadu_ps(k0 + 32); __m256 _k12 = _mm256_loadu_ps(k0 + 40); __m256 _k20 = _mm256_loadu_ps(k0 + 48); __m256 _k21 = _mm256_loadu_ps(k0 + 56); __m256 _k22 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r04, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r23, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r24, _sum1); __m256 _sum2 = _bias0; __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r15 = _mm256_loadu_ps(r1 + 40); __m256 _r25 = _mm256_loadu_ps(r2 + 40); __m256 _r06 = _mm256_loadu_ps(r0 + 48); __m256 _r16 = _mm256_loadu_ps(r1 + 48); __m256 _r26 = _mm256_loadu_ps(r2 + 48); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k00, _r04, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k01, _r05, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k02, _r06, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k10, _r14, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k11, _r15, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k12, _r16, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k20, _r24, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k21, _r25, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k22, _r26, _sum2); __m256 _sum3 = _bias0; __m256 _r07 = _mm256_loadu_ps(r0 + 56); __m256 _r17 = _mm256_loadu_ps(r1 + 56); __m256 _r27 = _mm256_loadu_ps(r2 + 56); __m256 _r08 = _mm256_loadu_ps(r0 + 64); __m256 _r18 = _mm256_loadu_ps(r1 + 64); __m256 _r28 = _mm256_loadu_ps(r2 + 64); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k00, _r06, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k01, _r07, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k02, _r08, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k10, _r16, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k11, _r17, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k12, _r18, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k20, _r26, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k21, _r27, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k22, _r28, _sum3); _mm256_storeu_ps(outptr0 + 24, _sum3); r0 += 2 * 32; r1 += 2 * 32; r2 += 2 * 32; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r04, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r23, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r24, _sum1); _mm256_storeu_ps(outptr0 + 8, _sum1); r0 += 2 * 16; r1 += 2 * 16; r2 += 2 * 16; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); _mm256_storeu_ps(outptr0, _sum0); r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; outptr0 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(8*t2-Nz,4)),t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(4*t1+Ny+5,4)),floord(8*t2+Ny+4,4)),floord(8*t1-8*t2+Nz+Ny+3,4));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(8*t2-Nz-508,512)),ceild(4*t3-Ny-508,512));t4<=min(min(min(min(floord(4*t3+Nx,512),floord(Nt+Nx-4,512)),floord(4*t1+Nx+5,512)),floord(8*t2+Nx+4,512)),floord(8*t1-8*t2+Nz+Nx+3,512));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),4*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),4*t3+2),512*t4+510),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(512*t4,t5+1); ubv=min(512*t4+511,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
yolov2_forward_network.c
#include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h // softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h #define GEMMCONV /* // from: box.h typedef struct { float x, y, w, h; } box; */ // binary transpose size_t binary_transpose_align_input(int k, int n, float *b, char **t_bit_input, size_t ldb_align, int bit_align) { size_t new_ldb = k + (ldb_align - k % ldb_align); // (k / 8 + 1) * 8; size_t t_intput_size = new_ldb * bit_align;// n; size_t t_bit_input_size = t_intput_size / 8;// +1; *t_bit_input = calloc(t_bit_input_size, sizeof(char)); //printf("\n t_bit_input_size = %d, k = %d, n = %d, new_ldb = %d \n", t_bit_input_size, k, n, new_ldb); int src_size = k * bit_align; transpose_bin(b, *t_bit_input, k, n, bit_align, new_ldb, 8); return t_intput_size; } // 4 layers in 1: convolution, batch-normalization, BIAS and activation void forward_convolutional_layer_cpu(layer l, network_state state) { int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1 int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1 int i, f, j; // fill zero (ALPHA) for (i = 0; i < l.outputs * l.batch; ++i) l.output[i] = 0; if (l.xnor) { if (!l.align_bit_weights) { binarize_weights(l.weights, l.n, l.c * l.size * l.size, l.binary_weights); //printf("\n binarize_weights l.align_bit_weights = %p \n", l.align_bit_weights); } binarize_cpu(state.input, l.c * l.h * l.w * l.batch, l.binary_input); l.weights = l.binary_weights; state.input = l.binary_input; } // l.n - number of filters on this layer // l.c - channels of input-array // l.h - height of input-array // l.w - width of input-array // l.size - width and height of filters (the same size for all filters) // 1. Convolution !!! #ifndef GEMMCONV int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < l.n; ++fil) { int chan, y, x, f_y, f_x; // channel index for (chan = 0; chan < l.c; ++chan) // input - y for (y = 0; y < l.h; ++y) // input - x for (x = 0; x < l.w; ++x) { int const output_index = fil*l.w*l.h + y*l.w + x; int const weights_pre_index = fil*l.c*l.size*l.size + chan*l.size*l.size; int const input_pre_index = chan*l.w*l.h; float sum = 0; // filter - y for (f_y = 0; f_y < l.size; ++f_y) { int input_y = y + f_y - l.pad; // filter - x for (f_x = 0; f_x < l.size; ++f_x) { int input_x = x + f_x - l.pad; if (input_y < 0 || input_x < 0 || input_y >= l.h || input_x >= l.w) continue; int input_index = input_pre_index + input_y*l.w + input_x; int weights_index = weights_pre_index + f_y*l.size + f_x; sum += state.input[input_index] * l.weights[weights_index]; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; l.output[output_index] += sum; } } #else int m = l.n; int k = l.size * l.size * l.c; int n = out_h * out_w; float *a = l.weights; float *b = state.workspace; float *c = l.output; // convolution as GEMM (as part of BLAS) for (i = 0; i < l.batch; ++i) { //im2col_cpu(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // im2col.c //im2col_cpu_custom(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // AVX2 // XNOR-net - bit-1: weights, input, calculation if (l.xnor && l.align_bit_weights && (l.stride == 1 && l.pad == 1)) { memset(b, 0, l.bit_align * l.size * l.size * l.c * sizeof(float)); if (l.c % 32 == 0) { //printf(" l.index = %d - new XNOR \n", l.index); int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k % ldb_align); // (k / 8 + 1) * 8; size_t t_intput_size = new_ldb * l.bit_align;// n; size_t t_bit_input_size = t_intput_size / 8;// +1; const int new_c = l.c / 32; float *re_packed_input = calloc(l.c * l.w * l.h, sizeof(float)); uint32_t *bin_re_packed_input = calloc(new_c * l.w * l.h + 1, sizeof(uint32_t)); // float32x4 by channel (as in cuDNN) repack_input(state.input, re_packed_input, l.w, l.h, l.c); // 32 x floats -> 1 x uint32_t float_to_bit(re_packed_input, (char *) bin_re_packed_input, l.c * l.w * l.h); free(re_packed_input); // slow - convolution the packed inputs and weights: float x 32 by channel (as in cuDNN) //convolution_repacked((uint32_t *)bin_re_packed_input, (uint32_t *)l.align_bit_weights, l.output, // l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr); // // then exit from if() im2col_cpu_custom((float *) bin_re_packed_input, new_c, l.h, l.w, l.size, l.stride, l.pad, b); //im2col_cpu((float *)bin_re_packed_input, new_c, l.h, l.w, l.size, l.stride, l.pad, b); free(bin_re_packed_input); int new_k = l.size * l.size * l.c / 32; // good for (l.c == 64) //gemm_nn_bin_32bit_packed(m, n, new_k, 1, // l.align_bit_weights, l.new_lda/32, // b, n, // c, n, l.mean_arr); // // then exit from if() //size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; //size_t t_intput_size = new_ldb * l.bit_align;// n; //size_t t_bit_input_size = t_intput_size / 8;// +1; char *t_bit_input = calloc(t_bit_input_size, sizeof(char)); transpose_uint32((uint32_t *) b, t_bit_input, new_k, n, n, new_ldb); // the main GEMM function gemm_nn_custom_bin_mean_transposed(m, n, k, 1, l.align_bit_weights, new_ldb, t_bit_input, new_ldb, c, n, l.mean_arr); // // alternative GEMM //gemm_nn_bin_transposed_32bit_packed(m, n, new_k, 1, // l.align_bit_weights, l.new_lda/32, // t_bit_input, new_ldb / 32, // c, n, l.mean_arr); free(t_bit_input); } else { // else (l.c % 32 != 0) //im2col_cpu_custom_align(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b, l.bit_align); im2col_cpu_custom_bin(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b, l.bit_align); int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k % ldb_align); char *t_bit_input = NULL; size_t t_intput_size = binary_transpose_align_input(k, n, b, &t_bit_input, ldb_align, l.bit_align); // 5x times faster than gemm()-float32 gemm_nn_custom_bin_mean_transposed(m, n, k, 1, l.align_bit_weights, new_ldb, t_bit_input, new_ldb, c, n, l.mean_arr); //gemm_nn_custom_bin_mean_transposed(m, n, k, 1, bit_weights, k, t_bit_input, new_ldb, c, n, mean_arr); //free(t_input); free(t_bit_input); } } else { im2col_cpu_custom(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // AVX2 int t; #pragma omp parallel for for (t = 0; t < m; ++t) { gemm_nn(1, n, k, 1, a + t * k, k, b, n, c + t * n, n); } } c += n * m; state.input += l.c * l.h * l.w; } #endif int const out_size = out_h * out_w; // 2. Batch normalization if (l.batch_normalize) { int b; for (b = 0; b < l.batch; b++) { for (f = 0; f < l.out_c; ++f) { for (i = 0; i < out_size; ++i) { int index = f * out_size + i; l.output[index + b * l.outputs] = (l.output[index + b * l.outputs] - l.rolling_mean[f]) / (sqrtf(l.rolling_variance[f]) + .000001f); } } // scale_bias for (i = 0; i < l.out_c; ++i) { for (j = 0; j < out_size; ++j) { l.output[i * out_size + j + b * l.outputs] *= l.scales[i]; } } } } // 3. Add BIAS //if (l.batch_normalize) { int b; for (b = 0; b < l.batch; b++) { for (i = 0; i < l.n; ++i) { for (j = 0; j < out_size; ++j) { l.output[i * out_size + j + b * l.outputs] += l.biases[i]; } } } } // 4. Activation function (LEAKY or LINEAR) //if (l.activation == LEAKY) { // for (i = 0; i < l.n*out_size; ++i) { // l.output[i] = leaky_activate(l.output[i]); // } //} //activate_array_cpu_custom(l.output, l.n*out_size, l.activation); activate_array_cpu_custom(l.output, l.outputs * l.batch, l.activation); } // MAX pooling layer void forward_maxpool_layer_cpu(const layer l, network_state state) { if (!state.train) { forward_maxpool_layer_avx(state.input, l.output, l.indexes, l.size, l.w, l.h, l.out_w, l.out_h, l.c, l.pad, l.stride, l.batch); return; } int b, i, j, k, m, n; const int w_offset = -l.pad; const int h_offset = -l.pad; const int h = l.out_h; const int w = l.out_w; const int c = l.c; // batch index for (b = 0; b < l.batch; ++b) { // channel index for (k = 0; k < c; ++k) { // y - input for (i = 0; i < h; ++i) { // x - input for (j = 0; j < w; ++j) { int out_index = j + w * (i + h * (k + c * b)); float max = -FLT_MAX; int max_i = -1; // pooling x-index for (n = 0; n < l.size; ++n) { // pooling y-index for (m = 0; m < l.size; ++m) { int cur_h = h_offset + i * l.stride + n; int cur_w = w_offset + j * l.stride + m; int index = cur_w + l.w * (cur_h + l.h * (k + b * l.c)); int valid = (cur_h >= 0 && cur_h < l.h && cur_w >= 0 && cur_w < l.w); float val = (valid != 0) ? state.input[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; // get max index max = (val > max) ? val : max; // get max value } } l.output[out_index] = max; // store max value l.indexes[out_index] = max_i; // store max index } } } } } // Route layer - just copy 1 or more layers into the current layer void forward_route_layer_cpu(const layer l, network_state state) { int i, j; int offset = 0; // number of merged layers for (i = 0; i < l.n; ++i) { int index = l.input_layers[i]; // source layer index float *input = state.net.layers[index].output; // source layer output ptr int input_size = l.input_sizes[i]; // source layer size // batch index for (j = 0; j < l.batch; ++j) { memcpy(l.output + offset + j * l.outputs, input + j * input_size, input_size * sizeof(float)); } offset += input_size; } } // Reorg layer - just change dimension sizes of the previous layer (some dimension sizes are increased by decreasing other) void forward_reorg_layer_cpu(const layer l, network_state state) { float *out = l.output; float *x = state.input; int out_w = l.out_w; int out_h = l.out_h; int out_c = l.out_c; int batch = l.batch; int stride = l.stride; int b, i, j, k; int in_c = out_c / (stride * stride); //printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward); //printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride); // batch for (b = 0; b < batch; ++b) { // channel for (k = 0; k < out_c; ++k) { // y for (j = 0; j < out_h; ++j) { // x for (i = 0; i < out_w; ++i) { int in_index = i + out_w * (j + out_h * (k + out_c * b)); int c2 = k % in_c; int offset = k / in_c; int w2 = i * stride + offset % stride; int h2 = j * stride + offset / stride; int out_index = w2 + out_w * stride * (h2 + out_h * stride * (c2 + in_c * b)); out[in_index] = x[out_index]; } } } } } // ---- upsample layer ---- // upsample_layer.c void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { int i, j, k, b; for (b = 0; b < batch; ++b) { for (k = 0; k < c; ++k) { for (j = 0; j < h * stride; ++j) { for (i = 0; i < w * stride; ++i) { int in_index = b * w * h * c + k * w * h + (j / stride) * w + i / stride; int out_index = b * w * h * c * stride * stride + k * w * h * stride * stride + j * w * stride + i; if (forward) out[out_index] = scale * in[in_index]; else in[in_index] += scale * out[out_index]; } } } } } // upsample_layer.c void forward_upsample_layer_cpu(const layer l, network_state net) { fill_cpu(l.outputs * l.batch, 0, l.output, 1); if (l.reverse) { upsample_cpu(l.output, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, net.input); } else { upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output); } } // blas.c (shortcut_layer) void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int stride = w1 / w2; int sample = w2 / w1; assert(stride == h1 / h2); assert(sample == h2 / h1); if (stride < 1) stride = 1; if (sample < 1) sample = 1; int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int i, j, k, b; for (b = 0; b < batch; ++b) { for (k = 0; k < minc; ++k) { for (j = 0; j < minh; ++j) { for (i = 0; i < minw; ++i) { int out_index = i * sample + w2 * (j * sample + h2 * (k + c2 * b)); int add_index = i * stride + w1 * (j * stride + h1 * (k + c1 * b)); out[out_index] += add[add_index]; } } } } } // blas.c void copy_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for (i = 0; i < N; ++i) Y[i * INCY] = X[i * INCX]; } // shortcut_layer.c void forward_shortcut_layer_cpu(const layer l, network_state state) { copy_cpu(l.outputs * l.batch, state.input, 1, l.output, 1); shortcut_cpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.output); activate_array(l.output, l.outputs * l.batch, l.activation); } // ---- yolo layer ---- void forward_yolo_layer_cpu(const layer l, network_state state) { int b, n; memcpy(l.output, state.input, l.outputs * l.batch * sizeof(float)); #ifndef GPU for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { int index = entry_index(l, b, n * l.w * l.h, 0); activate_array(l.output + index, 2 * l.w * l.h, LOGISTIC); index = entry_index(l, b, n * l.w * l.h, 4); activate_array(l.output + index, (1 + l.classes) * l.w * l.h, LOGISTIC); } } #endif //memset(l.delta, 0, l.outputs * l.batch * sizeof(float)); } // ---- region layer ---- static void softmax_cpu(float *input, int n, float temp, float *output) { int i; float sum = 0; float largest = -FLT_MAX; for (i = 0; i < n; ++i) { if (input[i] > largest) largest = input[i]; } for (i = 0; i < n; ++i) { float e = expf(input[i] / temp - largest / temp); sum += e; output[i] = e; } for (i = 0; i < n; ++i) { output[i] /= sum; } } static void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output) { int b; for (b = 0; b < batch; ++b) { int i; int count = 0; for (i = 0; i < hierarchy->groups; ++i) { int group_size = hierarchy->group_size[i]; softmax_cpu(input + b * inputs + count, group_size, temp, output + b * inputs + count); count += group_size; } } } // --- // Region layer - just change places of array items, then do logistic_activate and softmax void forward_region_layer_cpu(const layer l, network_state state) { int i, b; int size = l.coords + l.classes + 1; // 4 Coords(x,y,w,h) + Classes + 1 Probability-t0 memcpy(l.output, state.input, l.outputs * l.batch * sizeof(float)); //flatten(l.output, l.w*l.h, size*l.n, l.batch, 1); // convert many channels to the one channel (depth=1) // (each grid cell will have a number of float-variables equal = to the initial number of channels) { float *x = l.output; int layer_size = l.w * l.h; // W x H - size of layer int layers = size * l.n; // number of channels (where l.n = number of anchors) int batch = l.batch; float *swap = calloc(layer_size * layers * batch, sizeof(float)); int i, c, b; // batch index for (b = 0; b < batch; ++b) { // channel index for (c = 0; c < layers; ++c) { // layer grid index for (i = 0; i < layer_size; ++i) { int i1 = b * layers * layer_size + c * layer_size + i; int i2 = b * layers * layer_size + i * layers + c; swap[i2] = x[i1]; } } } memcpy(x, swap, layer_size * layers * batch * sizeof(float)); free(swap); } // logistic activation only for: t0 (where is t0 = Probability * IoU(box, object)) for (b = 0; b < l.batch; ++b) { // for each item (x, y, anchor-index) for (i = 0; i < l.h * l.w * l.n; ++i) { int index = size * i + b * l.outputs; float x = l.output[index + 4]; l.output[index + 4] = 1.0F / (1.0F + expf(-x)); // logistic_activate_cpu(l.output[index + 4]); } } if (l.softmax_tree) { // Yolo 9000 for (b = 0; b < l.batch; ++b) { for (i = 0; i < l.h * l.w * l.n; ++i) { int index = size * i + b * l.outputs; softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5); } } } else if (l.softmax) { // Yolo v2 // softmax activation only for Classes probability for (b = 0; b < l.batch; ++b) { // for each item (x, y, anchor-index) for (i = 0; i < l.h * l.w * l.n; ++i) { int index = size * i + b * l.outputs; softmax_cpu(l.output + index + 5, l.classes, 1, l.output + index + 5); } } } } void yolov2_forward_network_cpu(network net, network_state state) { state.workspace = net.workspace; int i; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { forward_convolutional_layer_cpu(l, state); //printf("\n CONVOLUTIONAL \t\t l.size = %d \n", l.size); } else if (l.type == MAXPOOL) { forward_maxpool_layer_cpu(l, state); //printf("\n MAXPOOL \t\t l.size = %d \n", l.size); } else if (l.type == ROUTE) { forward_route_layer_cpu(l, state); //printf("\n ROUTE \t\t\t l.n = %d \n", l.n); } else if (l.type == REORG) { forward_reorg_layer_cpu(l, state); //printf("\n REORG \n"); } else if (l.type == UPSAMPLE) { forward_upsample_layer_cpu(l, state); //printf("\n UPSAMPLE \n"); } else if (l.type == SHORTCUT) { forward_shortcut_layer_cpu(l, state); //printf("\n SHORTCUT \n"); } else if (l.type == YOLO) { forward_yolo_layer_cpu(l, state); //printf("\n YOLO \n"); } else if (l.type == REGION) { forward_region_layer_cpu(l, state); //printf("\n REGION \n"); } else { printf("\n layer: %d \n", l.type); } state.input = l.output; } } // detect on CPU float *network_predict_cpu(network net, float *input) { network_state state; state.net = net; state.index = 0; state.input = input; state.truth = 0; state.train = 0; state.delta = 0; yolov2_forward_network_cpu(net, state); // network on CPU //float *out = get_network_output(net); int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return net.layers[i].output; } // -------------------- // x - last conv-layer output // biases - anchors from cfg-file // n - number of anchors from cfg-file box get_region_box_cpu(float *x, float *biases, int n, int index, int i, int j, int w, int h) { box b; b.x = (i + logistic_activate(x[index + 0])) / w; // (col + 1./(1. + exp(-x))) / width_last_layer b.y = (j + logistic_activate(x[index + 1])) / h; // (row + 1./(1. + exp(-x))) / height_last_layer b.w = expf(x[index + 2]) * biases[2 * n] / w; // exp(x) * anchor_w / width_last_layer b.h = expf(x[index + 3]) * biases[2 * n + 1] / h; // exp(x) * anchor_h / height_last_layer return b; } // get prediction boxes void get_region_boxes_cpu(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map) { int i; float *const predictions = l.output; // grid index #pragma omp parallel for for (i = 0; i < l.w * l.h; ++i) { int j, n; int row = i / l.w; int col = i % l.w; // anchor index for (n = 0; n < l.n; ++n) { int index = i * l.n + n; // index for each grid-cell & anchor int p_index = index * (l.classes + 5) + 4; float scale = predictions[p_index]; // scale = t0 = Probability * IoU(box, object) if (l.classfix == -1 && scale < .5) scale = 0; // if(t0 < 0.5) t0 = 0; int box_index = index * (l.classes + 5); boxes[index] = get_region_box_cpu(predictions, l.biases, n, box_index, col, row, l.w, l.h); boxes[index].x *= w; boxes[index].y *= h; boxes[index].w *= w; boxes[index].h *= h; int class_index = index * (l.classes + 5) + 5; // Yolo 9000 or Yolo v2 if (l.softmax_tree) { // Yolo 9000 hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0); int found = 0; if (map) { for (j = 0; j < 200; ++j) { float prob = scale * predictions[class_index + map[j]]; probs[index][j] = (prob > thresh) ? prob : 0; } } else { for (j = l.classes - 1; j >= 0; --j) { if (!found && predictions[class_index + j] > .5) { found = 1; } else { predictions[class_index + j] = 0; } float prob = predictions[class_index + j]; probs[index][j] = (scale > thresh) ? prob : 0; } } } else { // Yolo v2 for (j = 0; j < l.classes; ++j) { float prob = scale * predictions[class_index + j]; // prob = IoU(box, object) = t0 * class-probability probs[index][j] = (prob > thresh) ? prob : 0; // if (IoU < threshold) IoU = 0; } } if (only_objectness) { probs[index][0] = scale; } } } } // ------ Calibration -------- // detect on CPU float *network_calibrate_cpu(network net, float *input) { network_state state; state.net = net; state.index = 0; state.input = input; state.truth = 0; state.train = 0; state.delta = 0; //yolov2_forward_network_cpu(net, state); // network on CPU // input calibration - for quantinization static int max_num = 100; static int counter = 0; static float *input_mult_array = NULL; if (net.do_input_calibration > 0) { // calibration for quantinization max_num = net.do_input_calibration; if (input_mult_array == NULL) { input_mult_array = (float *) calloc(net.n * max_num, sizeof(float)); } ++counter; // save calibration coefficients if (counter > max_num) { printf("\n\n Saving coefficients to the input_calibration.txt file... \n\n"); FILE *fw = fopen("input_calibration.txt", "wb"); char buff[1024]; //printf("\n float input_mult[] = { "); char *str1 = "input_calibration = "; printf("%s", str1); fwrite(str1, sizeof(char), strlen(str1), fw); int i; for (i = 0; i < net.n; ++i) if (net.layers[i].type == CONVOLUTIONAL) { printf("%g, ", input_mult_array[0 + i * max_num]); sprintf(buff, "%g, ", input_mult_array[0 + i * max_num]); fwrite(buff, sizeof(char), strlen(buff), fw); } char *str2 = "16"; printf("%s \n ---------------------------", str2); fwrite(str2, sizeof(char), strlen(str2), fw); fclose(fw); getchar(); exit(0); } } state.workspace = net.workspace; int i; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { if (net.do_input_calibration) { // calibration for quantinization //float multiplier = entropy_calibration(state.input, l.inputs, 1.0 / 8192, 2048); float multiplier = entropy_calibration(state.input, l.inputs, 1.0 / 16, 4096); //float multiplier = entropy_calibration(state.input, l.inputs, 1.0 / 4, 2*4096); printf(" multiplier = %f, l.inputs = %d \n\n", multiplier, l.inputs); input_mult_array[counter + i * max_num] = multiplier; if (counter >= max_num) { int j; float res_mult = 0; for (j = 0; j < max_num; ++j) res_mult += input_mult_array[j + i * max_num]; res_mult = res_mult / max_num; input_mult_array[0 + i * max_num] = res_mult; printf(" res_mult = %f, max_num = %d \n", res_mult, max_num); } } forward_convolutional_layer_cpu(l, state); //printf("\n CONVOLUTIONAL \t\t l.size = %d \n", l.size); } else if (l.type == MAXPOOL) { forward_maxpool_layer_cpu(l, state); //printf("\n MAXPOOL \t\t l.size = %d \n", l.size); } else if (l.type == ROUTE) { forward_route_layer_cpu(l, state); //printf("\n ROUTE \t\t\t l.n = %d \n", l.n); } else if (l.type == REORG) { forward_reorg_layer_cpu(l, state); //printf("\n REORG \n"); } else if (l.type == REGION) { forward_region_layer_cpu(l, state); //printf("\n REGION \n"); } else { printf("\n layer: %d \n", l.type); } state.input = l.output; } //int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; return net.layers[i].output; }