source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_bitmap_AxB_saxpy_A_sparse_B_bitmap_template.c
//------------------------------------------------------------------------------ // GB_bitmap_AxB_saxpy_A_sparse_B_bitmap: C<#M>+=A*B, C bitmap, M any format //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { if (use_coarse_tasks) { //---------------------------------------------------------------------- // C<#M> += A*B using coarse tasks //---------------------------------------------------------------------- // number of columns in the workspace for each task #define GB_PANEL_SIZE 4 //---------------------------------------------------------------------- // allocate workspace for each task //---------------------------------------------------------------------- GH_slice = GB_MALLOC (2*ntasks, int64_t) ; if (GH_slice == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } int64_t *GB_RESTRICT G_slice = GH_slice ; int64_t *GB_RESTRICT H_slice = GH_slice + ntasks ; int64_t gwork = 0 ; int64_t hwork = 0 ; int tid ; for (tid = 0 ; tid < ntasks ; tid++) { int64_t jstart, jend ; GB_PARTITION (jstart, jend, bvdim, tid, ntasks) ; int64_t jtask = jend - jstart ; int64_t jpanel = GB_IMIN (jtask, GB_PANEL_SIZE) ; G_slice [tid] = gwork ; H_slice [tid] = hwork ; if (jpanel > 1) { // no need to allocate workspace for Gb and Gx if jpanel == 1 gwork += jpanel ; } hwork += jpanel ; } int64_t bvlenx = (B_is_pattern ? 0 : bvlen) * GB_BSIZE ; int64_t cvlenx = (GB_IS_ANY_PAIR_SEMIRING ? 0 : cvlen) * GB_CSIZE ; int64_t bvlenb = (GB_B_IS_BITMAP ? bvlen : 0) ; size_t gfspace = gwork * bvlenb ; size_t wfspace = gfspace + hwork * cvlen ; size_t wbxspace = gwork * bvlenx ; size_t wcxspace = hwork * cvlenx ; Wf = GB_MALLOC (wfspace, int8_t) ; Wbx = GB_MALLOC (wbxspace, GB_void) ; Wcx = GB_MALLOC (wcxspace, GB_void) ; if (Wf == NULL || Wcx == NULL || Wbx == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // C<#M> += A*B //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the vectors of B and C for this coarse task //------------------------------------------------------------------ int64_t jstart, jend ; GB_PARTITION (jstart, jend, bvdim, tid, ntasks) ; int64_t jtask = jend - jstart ; int64_t jpanel = GB_IMIN (jtask, GB_PANEL_SIZE) ; int64_t task_cnvals = 0 ; //------------------------------------------------------------------ // get the workspace for this task //------------------------------------------------------------------ // Gb and Gx workspace to load the panel of B int8_t *GB_RESTRICT Gb = Wf + G_slice [tid] * bvlenb ; GB_BTYPE *GB_RESTRICT Gx = (GB_BTYPE *) (Wbx + G_slice [tid] * bvlenx) ; // Hf and Hx workspace to compute the panel of C int8_t *GB_RESTRICT Hf = Wf + (H_slice [tid] * cvlen) + gfspace ; GB_CTYPE *GB_RESTRICT Hx = (GB_CTYPE *) (Wcx + H_slice [tid] * cvlenx) ; #if GB_IS_PLUS_FC32_MONOID float *GB_RESTRICT Hx_real = (float *) Hx ; float *GB_RESTRICT Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID double *GB_RESTRICT Hx_real = (double *) Hx ; double *GB_RESTRICT Hx_imag = Hx_real + 1 ; #endif //------------------------------------------------------------------ // clear the panel //------------------------------------------------------------------ memset (Hf, 0, jpanel * cvlen) ; //------------------------------------------------------------------ // C<#M>(:,jstart:jend-1) += A * B(:,jstart:jend-1) by panel //------------------------------------------------------------------ for (int64_t j1 = jstart ; j1 < jend ; j1 += jpanel) { //-------------------------------------------------------------- // get the panel of np vectors j1:j2-1 //-------------------------------------------------------------- int64_t j2 = GB_IMIN (jend, j1 + jpanel) ; int64_t np = j2 - j1 ; //-------------------------------------------------------------- // load and transpose B(:,j1:j2-1) for one panel //-------------------------------------------------------------- #if GB_B_IS_BITMAP { if (np == 1) { // no need to load a single vector of B Gb = (int8_t *) (Bb + (j1 * bvlen)) ; } else { // load and transpose the bitmap of B(:,j1:j2-1) for (int64_t jj = 0 ; jj < np ; jj++) { int64_t j = j1 + jj ; for (int64_t i = 0 ; i < bvlen ; i++) { Gb [i*np + jj] = Bb [i + j * bvlen] ; } } } } #endif if (!B_is_pattern) { if (np == 1) { // no need to load a single vector of B GB_void *GB_RESTRICT Bx = B->x ; Gx = (GB_BTYPE *) (Bx + (j1 * bvlen) * GB_BSIZE) ; } else { // load and transpose the values of B(:,j1:j2-1) for (int64_t jj = 0 ; jj < np ; jj++) { int64_t j = j1 + jj ; for (int64_t i = 0 ; i < bvlen ; i++) { // G(i,jj) = B(i,j), and change storage order int64_t pG = i*np + jj ; int64_t pB = i + j * bvlen ; GB_LOADB (Gx, pG, Bx, pB) ; } } } } //-------------------------------------------------------------- // H = A*G for one panel //-------------------------------------------------------------- for (int64_t kA = 0 ; kA < anvec ; kA++) { //---------------------------------------------------------- // get A(:,k) //---------------------------------------------------------- int64_t k = GBH (Ah, kA) ; int64_t pA = Ap [kA] ; int64_t pA_end = Ap [kA+1] ; int64_t pG = k * np ; #undef GB_MULT_A_ik_G_kjj #if GB_IS_PAIR_MULTIPLIER // t = A(i,k) * G (k,jj) is always equal to 1 #define GB_MULT_A_ik_G_kjj(jj) #else // t = A(i,k) * G (k,jj) GB_CIJ_DECLARE (t) ; #define GB_MULT_A_ik_G_kjj(jj) \ GB_GETB (gkj, Gx, pG+jj) ; \ GB_MULT (t, aik, gkj, i, k, j1 + jj) ; #endif #undef GB_HX_COMPUTE #define GB_HX_COMPUTE(jj) \ { \ /* H (i,jj) += A(i,k)*G(k,jj) */ \ if (!GB_B_IS_BITMAP || Gb [pG+jj]) \ { \ GB_MULT_A_ik_G_kjj (jj) ; \ if (Hf [pH+jj] == 0) \ { \ /* H(i,jj) is a new entry */ \ GB_HX_WRITE (pH+jj, t) ; /* Hx(i,jj)=t */ \ Hf [pH+jj] = 1 ; \ } \ else \ { \ /* H(i,jj) is already present */ \ GB_HX_UPDATE (pH+jj, t) ; /* Hx(i,jj)+=t */ \ } \ } \ } #undef GB_LOAD_A_ij #define GB_LOAD_A_ij \ int64_t i = Ai [pA] ; \ GB_GETA (aik, Ax, pA) ; \ int64_t pH = i * np ; //---------------------------------------------------------- // H += A(:,k)*G(k,:) //---------------------------------------------------------- #if GB_B_IS_BITMAP bool gb = false ; switch (np) { case 4 : gb = Gb [pG+3] ; case 3 : gb |= Gb [pG+2] ; case 2 : gb |= Gb [pG+1] ; case 1 : gb |= Gb [pG ] ; default: ; } if (gb) #endif { switch (np) { case 4 : for ( ; pA < pA_end ; pA++) { GB_LOAD_A_ij ; GB_HX_COMPUTE (0) ; GB_HX_COMPUTE (1) ; GB_HX_COMPUTE (2) ; GB_HX_COMPUTE (3) ; } break ; case 3 : for ( ; pA < pA_end ; pA++) { GB_LOAD_A_ij ; GB_HX_COMPUTE (0) ; GB_HX_COMPUTE (1) ; GB_HX_COMPUTE (2) ; } break ; case 2 : for ( ; pA < pA_end ; pA++) { GB_LOAD_A_ij ; GB_HX_COMPUTE (0) ; GB_HX_COMPUTE (1) ; } break ; case 1 : for ( ; pA < pA_end ; pA++) { GB_LOAD_A_ij ; GB_HX_COMPUTE (0) ; } break ; default:; } } #undef GB_MULT_A_ik_G_kjj #undef GB_HX_COMPUTE #undef GB_LOAD_A_ij } //-------------------------------------------------------------- // C<#M>(:,j1:j2-1) += H //-------------------------------------------------------------- for (int64_t jj = 0 ; jj < np ; jj++) { //---------------------------------------------------------- // C<#M>(:,j) += H (:,jj) //---------------------------------------------------------- int64_t j = j1 + jj ; int64_t pC_start = j * avlen ; // get pointer to C(:,j) for (int64_t i = 0 ; i < cvlen ; i++) { int64_t pC = pC_start + i ; // pointer to C(i,j) int64_t pH = i * np + jj ; // pointer to H(i,jj) if (!Hf [pH]) continue ; Hf [pH] = 0 ; // clear the panel int8_t cb = Cb [pC] ; //------------------------------------------------------ // check M(i,j) //------------------------------------------------------ #if GB_MASK_IS_SPARSE_OR_HYPER // M is sparse or hypersparse bool mij = ((cb & 2) != 0) ^ Mask_comp ; if (!mij) continue ; cb = (cb & 1) ; #elif GB_MASK_IS_BITMAP_OR_FULL // M is bitmap or full GB_GET_M_ij (pC) ; mij = mij ^ Mask_comp ; if (!mij) continue ; #endif //------------------------------------------------------ // C(i,j) += H(i,jj) //------------------------------------------------------ if (cb == 0) { // C(i,j) = H(i,jj) #if GB_IS_ANY_PAIR_SEMIRING Cx [pC] = GB_CTYPE_CAST (1, 0) ; // C(i,j) = 1 #else GB_CIJ_GATHER (pC, pH) ; #endif Cb [pC] = keep ; task_cnvals++ ; } else { // Currently, the matrix C is a newly allocated // matrix, not the C_in input matrix to GrB_mxm. // As a result, this condition is not used. It // will be in the future when this method is // modified to modify C in-place. ASSERT (GB_DEAD_CODE) ; // C(i,j) += H(i,jj) GB_CIJ_GATHER_UPDATE (pC, pH) ; } } } } cnvals += task_cnvals ; } #undef GB_PANEL_SIZE } else if (use_atomics) { //---------------------------------------------------------------------- // C<#M> += A*B using fine tasks and atomics //---------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the vector of B and C for this fine task //------------------------------------------------------------------ // The fine task operates on C(:,j) and B(:,j). Its fine task // id ranges from 0 to nfine_tasks_per_vector-1, and determines // which slice of A to operate on. int64_t j = tid / nfine_tasks_per_vector ; int fine_tid = tid % nfine_tasks_per_vector ; int64_t kfirst = A_slice [fine_tid] ; int64_t klast = A_slice [fine_tid + 1] ; int64_t pB_start = j * bvlen ; // pointer to B(:,j) int64_t pC_start = j * avlen ; // pointer to C(:,j) GB_GET_T_FOR_SECONDJ ; // t = j or j+1 for SECONDJ* int64_t task_cnvals = 0 ; // for Hx Gustavason workspace: use C(:,j) in-place: GB_CTYPE *GB_RESTRICT Hx = (GB_CTYPE *) (((GB_void *) Cx) + (pC_start * GB_CSIZE)) ; #if GB_IS_PLUS_FC32_MONOID || GB_IS_ANY_FC32_MONOID float *GB_RESTRICT Hx_real = (float *) Hx ; float *GB_RESTRICT Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID || GB_IS_ANY_FC64_MONOID double *GB_RESTRICT Hx_real = (double *) Hx ; double *GB_RESTRICT Hx_imag = Hx_real + 1 ; #endif //------------------------------------------------------------------ // C<#M>(:,j) += A(:,k1:k2) * B(k1:k2,j) //------------------------------------------------------------------ for (int64_t kk = kfirst ; kk < klast ; kk++) { //-------------------------------------------------------------- // C<#M>(:,j) += A(:,k) * B(k,j) //-------------------------------------------------------------- int64_t k = GBH (Ah, kk) ; // k in range k1:k2 int64_t pB = pB_start + k ; // get pointer to B(k,j) if (!GBB (Bb, pB)) continue ; int64_t pA = Ap [kk] ; int64_t pA_end = Ap [kk+1] ; GB_GET_B_kj ; // bkj = B(k,j) for ( ; pA < pA_end ; pA++) { //---------------------------------------------------------- // get A(i,k) and C(i,j) //---------------------------------------------------------- int64_t i = Ai [pA] ; // get A(i,k) index int64_t pC = pC_start + i ; // get C(i,j) pointer int8_t cb ; //---------------------------------------------------------- // C<#M>(i,j) += A(i,k) * B(k,j) //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER { //------------------------------------------------------ // M is sparse, and scattered into the C bitmap //------------------------------------------------------ // finite-state machine in Cb [pC]: // 0: cij not present, mij zero // 1: cij present, mij zero (keep==1 for !M) // 2: cij not present, mij one // 3: cij present, mij one (keep==3 for M) // 7: cij is locked #if GB_HAS_ATOMIC { // if C(i,j) is already present and can be modified // (cb==keep), and the monoid can be done // atomically, then do the atomic update. No need // to modify Cb [pC]. GB_ATOMIC_READ cb = Cb [pC] ; // grab the entry if (cb == keep) { #if !GB_IS_ANY_MONOID GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j) GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t #endif continue ; // C(i,j) has been updated } } #endif do // lock the entry { // do this atomically: // { cb = Cb [pC] ; Cb [pC] = 7 ; } GB_ATOMIC_CAPTURE_INT8 (cb, Cb [pC], 7) ; } while (cb == 7) ; // lock owner gets 0, 1, 2, or 3 if (cb == keep-1) { // C(i,j) is a new entry GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) #if GB_IS_ANY_PAIR_SEMIRING GB_ATOMIC_SET_HX_ONE (i) ; // C(i,j) = 1 #else GB_ATOMIC_WRITE_HX (i, t) ; // C(i,j) = t #endif task_cnvals++ ; cb = keep ; // keep the entry } else if (cb == keep) { // C(i,j) is already present #if !GB_IS_ANY_MONOID GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t #endif } GB_ATOMIC_WRITE Cb [pC] = cb ; // unlock the entry } #else { //------------------------------------------------------ // M is not present, or bitmap/full //------------------------------------------------------ // finite-state machine in Cb [pC]: // 0: cij not present; can be written // 1: cij present; can be updated // 7: cij is locked #if GB_MASK_IS_BITMAP_OR_FULL { // M is bitmap or full, and not in C bitmap. // Do not modify C(i,j) if not permitted by the mask GB_GET_M_ij (pC) ; mij = mij ^ Mask_comp ; if (!mij) continue ; } #endif //------------------------------------------------------ // C(i,j) += A(i,j) * B(k,j) //------------------------------------------------------ #if GB_HAS_ATOMIC { // if C(i,j) is already present (cb==1), and the // monoid can be done atomically, then do the // atomic update. No need to modify Cb [pC]. GB_ATOMIC_READ cb = Cb [pC] ; // grab the entry if (cb == 1) { #if !GB_IS_ANY_MONOID GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j) GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t #endif continue ; // C(i,j) has been updated } } #endif do // lock the entry { // do this atomically: // { cb = Cb [pC] ; Cb [pC] = 7 ; } GB_ATOMIC_CAPTURE_INT8 (cb, Cb [pC], 7) ; } while (cb == 7) ; // lock owner gets 0 or 1 if (cb == 0) { // C(i,j) is a new entry GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) #if GB_IS_ANY_PAIR_SEMIRING GB_ATOMIC_SET_HX_ONE (i) ; // C(i,j) = 1 #else GB_ATOMIC_WRITE_HX (i, t) ; // C(i,j) = t #endif task_cnvals++ ; } else // cb == 1 { // C(i,j) is already present #if !GB_IS_ANY_MONOID GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t #endif } GB_ATOMIC_WRITE Cb [pC] = 1 ; // unlock the entry } #endif } } cnvals += task_cnvals ; } } else { //---------------------------------------------------------------------- // C<#M> += A*B using fine tasks and workspace, with no atomics //---------------------------------------------------------------------- // Each fine task is given size-cvlen workspace to compute its result // in the first phase, W(:,tid) = A(:,k1:k2) * B(k1:k2,j), where k1:k2 // is defined by the fine_tid of the task. The workspaces are then // summed into C in the second phase. //---------------------------------------------------------------------- // allocate workspace //---------------------------------------------------------------------- size_t workspace = cvlen * ntasks ; Wf = GB_CALLOC (workspace, int8_t) ; size_t cxsize = (GB_IS_ANY_PAIR_SEMIRING) ? 0 : GB_CSIZE ; Wcx = GB_MALLOC (workspace * cxsize, GB_void) ; if (Wf == NULL || Wcx == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // first phase: W (:,tid) = A (:,k1:k2) * B (k2:k2,j) for each fine task //---------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the vector of B and C for this fine task //------------------------------------------------------------------ // The fine task operates on C(:,j) and B(:,j). Its fine task // id ranges from 0 to nfine_tasks_per_vector-1, and determines // which slice of A to operate on. int64_t j = tid / nfine_tasks_per_vector ; int fine_tid = tid % nfine_tasks_per_vector ; int64_t kfirst = A_slice [fine_tid] ; int64_t klast = A_slice [fine_tid + 1] ; int64_t pB_start = j * bvlen ; // pointer to B(:,j) int64_t pC_start = j * avlen ; // pointer to C(:,j), for bitmap int64_t pW_start = tid * avlen ; // pointer to W(:,tid) GB_GET_T_FOR_SECONDJ ; // t = j or j+1 for SECONDJ* int64_t task_cnvals = 0 ; // for Hf and Hx Gustavason workspace: use W(:,tid): int8_t *GB_RESTRICT Hf = Wf + pW_start ; GB_CTYPE *GB_RESTRICT Hx = (GB_CTYPE *) (Wcx + (pW_start * cxsize)) ; #if GB_IS_PLUS_FC32_MONOID float *GB_RESTRICT Hx_real = (float *) Hx ; float *GB_RESTRICT Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID double *GB_RESTRICT Hx_real = (double *) Hx ; double *GB_RESTRICT Hx_imag = Hx_real + 1 ; #endif //------------------------------------------------------------------ // W<#M> = A(:,k1:k2) * B(k1:k2,j) //------------------------------------------------------------------ for (int64_t kk = kfirst ; kk < klast ; kk++) { //-------------------------------------------------------------- // W<#M>(:,tid) += A(:,k) * B(k,j) //-------------------------------------------------------------- int64_t k = GBH (Ah, kk) ; // k in range k1:k2 int64_t pB = pB_start + k ; // get pointer to B(k,j) if (!GBB (Bb, pB)) continue ; int64_t pA = Ap [kk] ; int64_t pA_end = Ap [kk+1] ; GB_GET_B_kj ; // bkj = B(k,j) for ( ; pA < pA_end ; pA++) { //---------------------------------------------------------- // get A(i,k) //---------------------------------------------------------- int64_t i = Ai [pA] ; // get A(i,k) index //---------------------------------------------------------- // check M(i,j) //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER { // M is sparse or hypersparse int64_t pC = pC_start + i ; int8_t cb = Cb [pC] ; bool mij = ((cb & 2) != 0) ^ Mask_comp ; if (!mij) continue ; } #elif GB_MASK_IS_BITMAP_OR_FULL { // M is bitmap or full int64_t pC = pC_start + i ; GB_GET_M_ij (pC) ; mij = mij ^ Mask_comp ; if (!mij) continue ; } #endif //---------------------------------------------------------- // W<#M>(i) += A(i,k) * B(k,j) //---------------------------------------------------------- #if GB_IS_ANY_PAIR_SEMIRING { // Hx is not used; Cx [...] = 1 is done below Hf [i] = 1 ; } #else { GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) if (Hf [i] == 0) { // W(i,j) is a new entry GB_HX_WRITE (i, t) ; // Hx(i) = t Hf [i] = 1 ; } else { // W(i) is already present GB_HX_UPDATE (i, t) ; // Hx(i) += t } } #endif } } } //---------------------------------------------------------------------- // second phase: C<#M> += reduce (W) //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the W and C for this fine task //------------------------------------------------------------------ // The fine task operates on C(i1:i2,j) and W(i1:i2,w1:w2), where // i1:i2 is defined by the fine task id. Its fine task id ranges // from 0 to nfine_tasks_per_vector-1. // w1:w2 are the updates to C(:,j), where w1:w2 = // [j*nfine_tasks_per_vector : (j+1)*nfine_tasks_per_vector-1]. int64_t j = tid / nfine_tasks_per_vector ; int fine_tid = tid % nfine_tasks_per_vector ; int64_t istart, iend ; GB_PARTITION (istart, iend, cvlen, fine_tid, nfine_tasks_per_vector) ; int64_t pC_start = j * cvlen ; // pointer to C(:,j) int64_t wstart = j * nfine_tasks_per_vector ; int64_t wend = (j + 1) * nfine_tasks_per_vector ; int64_t task_cnvals = 0 ; // Hx = (typecasted) Wcx workspace, use Wf as-is GB_CTYPE *GB_RESTRICT Hx = ((GB_CTYPE *) Wcx) ; #if GB_IS_PLUS_FC32_MONOID float *GB_RESTRICT Hx_real = (float *) Hx ; float *GB_RESTRICT Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID double *GB_RESTRICT Hx_real = (double *) Hx ; double *GB_RESTRICT Hx_imag = Hx_real + 1 ; #endif //------------------------------------------------------------------ // C<#M>(i1:i2,j) += reduce (W (i2:i2, wstart:wend)) //------------------------------------------------------------------ for (int64_t w = wstart ; w < wend ; w++) { //-------------------------------------------------------------- // C<#M>(i1:i2,j) += W (i1:i2,w) //-------------------------------------------------------------- int64_t pW_start = w * cvlen ; // pointer to W (:,w) for (int64_t i = istart ; i < iend ; i++) { //---------------------------------------------------------- // get pointer and bitmap C(i,j) and W(i,w) //---------------------------------------------------------- int64_t pW = pW_start + i ; // pointer to W(i,w) if (Wf [pW] == 0) continue ; // skip if not present int64_t pC = pC_start + i ; // pointer to C(i,j) int8_t cb = Cb [pC] ; // bitmap status of C(i,j) //---------------------------------------------------------- // M(i,j) already checked, but adjust Cb if M is sparse //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER { // M is sparse or hypersparse cb = (cb & 1) ; } #endif //---------------------------------------------------------- // C(i,j) += W (i,w) //---------------------------------------------------------- if (cb == 0) { // C(i,j) = W(i,w) #if GB_IS_ANY_PAIR_SEMIRING Cx [pC] = GB_CTYPE_CAST (1, 0) ; // C(i,j) = 1 #else GB_CIJ_GATHER (pC, pW) ; #endif Cb [pC] = keep ; task_cnvals++ ; } else { // C(i,j) += W(i,w) GB_CIJ_GATHER_UPDATE (pC, pW) ; } } } cnvals += task_cnvals ; } } }
parallel-inl.h
// // parallel-inl.h // DigitalRender // // Created by 杨丰 on 2020/11/3. // #ifndef parallel_inl_h #define parallel_inl_h #include "constants.h" #include <algorithm> #include <functional> #include <future> #include <vector> #define JET_TASKING_TBB true #ifdef JET_TASKING_TBB #include <tbb/parallel_for.h> #include <tbb/parallel_reduce.h> #include <tbb/parallel_sort.h> #include <tbb/task.h> #elif defined(JET_TASKING_CPP11THREADS) #include <thread> #endif namespace internal { // NOTE - This abstraction takes a lambda which should take captured // variables by *value* to ensure no captured references race // with the task itself. template <typename TASK_T> inline void schedule(TASK_T&& fcn) { #ifdef JET_TASKING_TBB struct LocalTBBTask : public tbb::task { TASK_T func; tbb::task* execute() override { func(); return nullptr; } LocalTBBTask(TASK_T&& f) : func(std::forward<TASK_T>(f)) {} }; auto* tbb_node = new (tbb::task::allocate_root()) LocalTBBTask(std::forward<TASK_T>(fcn)); tbb::task::enqueue(*tbb_node); #elif defined(JET_TASKING_CPP11THREADS) std::thread thread(fcn); thread.detach(); #else // OpenMP or Serial --> synchronous! fcn(); #endif } template <typename TASK_T> using operator_return_t = typename std::result_of<TASK_T()>::type; // NOTE - see above, same issues associated with schedule() template <typename TASK_T> inline auto async(TASK_T&& fcn) -> std::future<operator_return_t<TASK_T>> { using package_t = std::packaged_task<operator_return_t<TASK_T>()>; auto task = new package_t(std::forward<TASK_T>(fcn)); auto future = task->get_future(); schedule([=]() { (*task)(); delete task; }); return future; } // Adopted from: // Radenski, A. // Shared Memory, Message Passing, and Hybrid Merge Sorts for Standalone and // Clustered SMPs. Proc PDPTA'11, the 2011 International Conference on Parallel // and Distributed Processing Techniques and Applications, CSREA Press // (H. Arabnia, Ed.), 2011, pp. 367 - 373. template <typename RandomIterator, typename RandomIterator2, typename CompareFunction> void merge(RandomIterator a, size_t size, RandomIterator2 temp, CompareFunction compareFunction) { size_t i1 = 0; size_t i2 = size / 2; size_t tempi = 0; while (i1 < size / 2 && i2 < size) { if (compareFunction(a[i1], a[i2])) { temp[tempi] = a[i1]; i1++; } else { temp[tempi] = a[i2]; i2++; } tempi++; } while (i1 < size / 2) { temp[tempi] = a[i1]; i1++; tempi++; } while (i2 < size) { temp[tempi] = a[i2]; i2++; tempi++; } // Copy sorted temp array into main array, a parallelFor(kZeroSize, size, [&](size_t i) { a[i] = temp[i]; }); } template <typename RandomIterator, typename RandomIterator2, typename CompareFunction> void parallelMergeSort(RandomIterator a, size_t size, RandomIterator2 temp, unsigned int numThreads, CompareFunction compareFunction) { if (numThreads == 1) { std::sort(a, a + size, compareFunction); } else if (numThreads > 1) { std::vector<std::future<void>> pool; pool.reserve(2); auto launchRange = [compareFunction](RandomIterator begin, size_t k2, RandomIterator2 temp, unsigned int numThreads) { parallelMergeSort(begin, k2, temp, numThreads, compareFunction); }; pool.emplace_back(internal::async( [=]() { launchRange(a, size / 2, temp, numThreads / 2); })); pool.emplace_back(internal::async([=]() { launchRange(a + size / 2, size - size / 2, temp + size / 2, numThreads - numThreads / 2); })); // Wait for jobs to finish for (auto& f : pool) { if (f.valid()) { f.wait(); } } merge(a, size, temp, compareFunction); } } } // namespace internal template <typename RandomIterator, typename T> void parallelFill(const RandomIterator& begin, const RandomIterator& end, const T& value, ExecutionPolicy policy) { auto diff = end - begin; if (diff <= 0) { return; } size_t size = static_cast<size_t>(diff); parallelFor(kZeroSize, size, [begin, value](size_t i) { begin[i] = value; }, policy); } // Adopted from http://ideone.com/Z7zldb template <typename IndexType, typename Function> void parallelFor(IndexType start, IndexType end, const Function& func, ExecutionPolicy policy) { if (start > end) { return; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_for(start, end, func); } else { for (auto i = start; i < end; ++i) { func(i); } } #elif JET_TASKING_CPP11THREADS // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // [Helper] Inner loop auto launchRange = [&func](IndexType k1, IndexType k2) { for (IndexType k = k1; k < k2; k++) { func(k); } }; // Create pool and launch jobs std::vector<std::thread> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); for (unsigned int i = 0; i + 1 < numThreads && i1 < end; ++i) { pool.emplace_back(launchRange, i1, i2); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back(launchRange, i1, end); } // Wait for jobs to finish for (std::thread& t : pool) { if (t.joinable()) { t.join(); } } #else #ifdef JET_TASKING_OPENMP if (policy == ExecutionPolicy::kParallel) { #pragma omp parallel for #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) for (ssize_t i = start; i < ssize_t(end); ++i) { #else // !MSVC || Intel for (auto i = start; i < end; ++i) { #endif // MSVC && !Intel func(i); } } else { for (auto i = start; i < end; ++i) { func(i); } } #else // JET_TASKING_OPENMP for (auto i = start; i < end; ++i) { func(i); } #endif // JET_TASKING_OPENMP #endif } template <typename IndexType, typename Function> void parallelRangeFor(IndexType start, IndexType end, const Function& func, ExecutionPolicy policy) { if (start > end) { return; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_for(tbb::blocked_range<IndexType>(start, end), [&func](const tbb::blocked_range<IndexType>& range) { func(range.begin(), range.end()); }); } else { func(start, end); } #else // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // Create pool and launch jobs std::vector<std::future<void>> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); for (unsigned int i = 0; i + 1 < numThreads && i1 < end; ++i) { pool.emplace_back(internal::async([=]() { func(i1, i2); })); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back(internal::async([=]() { func(i1, end); })); } // Wait for jobs to finish for (auto& f : pool) { if (f.valid()) { f.wait(); } } #endif } template <typename IndexType, typename Function> void parallelFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, const Function& function, ExecutionPolicy policy) { parallelFor(beginIndexY, endIndexY, [&](IndexType j) { for (IndexType i = beginIndexX; i < endIndexX; ++i) { function(i, j); } }, policy); } template <typename IndexType, typename Function> void parallelRangeFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, const Function& function, ExecutionPolicy policy) { parallelRangeFor(beginIndexY, endIndexY, [&](IndexType jBegin, IndexType jEnd) { function(beginIndexX, endIndexX, jBegin, jEnd); }, policy); } template <typename IndexType, typename Function> void parallelFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, IndexType beginIndexZ, IndexType endIndexZ, const Function& function, ExecutionPolicy policy) { parallelFor(beginIndexZ, endIndexZ, [&](IndexType k) { for (IndexType j = beginIndexY; j < endIndexY; ++j) { for (IndexType i = beginIndexX; i < endIndexX; ++i) { function(i, j, k); } } }, policy); } template <typename IndexType, typename Function> void parallelRangeFor(IndexType beginIndexX, IndexType endIndexX, IndexType beginIndexY, IndexType endIndexY, IndexType beginIndexZ, IndexType endIndexZ, const Function& function, ExecutionPolicy policy) { parallelRangeFor(beginIndexZ, endIndexZ, [&](IndexType kBegin, IndexType kEnd) { function(beginIndexX, endIndexX, beginIndexY, endIndexY, kBegin, kEnd); }, policy); } template <typename IndexType, typename Value, typename Function, typename Reduce> Value parallelReduce(IndexType start, IndexType end, const Value& identity, const Function& func, const Reduce& reduce, ExecutionPolicy policy) { if (start > end) { return identity; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { return tbb::parallel_reduce( tbb::blocked_range<IndexType>(start, end), identity, [&func](const tbb::blocked_range<IndexType>& range, const Value& init) { return func(range.begin(), range.end(), init); }, reduce); } else { (void)reduce; return func(start, end, identity); } #else // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; // Size of a slice for the range functions IndexType n = end - start + 1; IndexType slice = (IndexType)std::round(n / static_cast<double>(numThreads)); slice = std::max(slice, IndexType(1)); // Results std::vector<Value> results(numThreads, identity); // [Helper] Inner loop auto launchRange = [&](IndexType k1, IndexType k2, unsigned int tid) { results[tid] = func(k1, k2, identity); }; // Create pool and launch jobs std::vector<std::future<void>> pool; pool.reserve(numThreads); IndexType i1 = start; IndexType i2 = std::min(start + slice, end); unsigned int tid = 0; for (; tid + 1 < numThreads && i1 < end; ++tid) { pool.emplace_back(internal::async([=]() { launchRange(i1, i2, tid); })); i1 = i2; i2 = std::min(i2 + slice, end); } if (i1 < end) { pool.emplace_back( internal::async([=]() { launchRange(i1, end, tid); })); } // Wait for jobs to finish for (auto& f : pool) { if (f.valid()) { f.wait(); } } // Gather Value finalResult = identity; for (const Value& val : results) { finalResult = reduce(val, finalResult); } return finalResult; #endif } template <typename RandomIterator, typename CompareFunction> void parallelSort(RandomIterator begin, RandomIterator end, CompareFunction compareFunction, ExecutionPolicy policy) { if (end < begin) { return; } #ifdef JET_TASKING_TBB if (policy == ExecutionPolicy::kParallel) { tbb::parallel_sort(begin, end, compareFunction); } else { std::sort(begin, end, compareFunction); } #else size_t size = static_cast<size_t>(end - begin); typedef typename std::iterator_traits<RandomIterator>::value_type value_type; std::vector<value_type> temp(size); // Estimate number of threads in the pool unsigned int numThreadsHint = maxNumberOfThreads(); const unsigned int numThreads = (policy == ExecutionPolicy::kParallel) ? (numThreadsHint == 0u ? 8u : numThreadsHint) : 1; internal::parallelMergeSort(begin, size, temp.begin(), numThreads, compareFunction); #endif } template <typename RandomIterator> void parallelSort(RandomIterator begin, RandomIterator end, ExecutionPolicy policy) { parallelSort( begin, end, std::less<typename std::iterator_traits<RandomIterator>::value_type>(), policy); } #endif /* parallel_inl_h */
rnn_helpers.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #ifdef _WIN32 #pragma warning(disable : 4267) #endif #include <algorithm> #include <functional> #include <future> #include <string> #include <vector> #include "gsl/gsl" #include "core/common/common.h" #include "core/common/logging/logging.h" #include "core/framework/allocator.h" #include "core/util/math.h" #include "core/util/math_cpuonly.h" #include "core/platform/threadpool.h" namespace onnxruntime { class Tensor; class OpKernelContext; namespace rnn { namespace detail { enum Direction { kForward = 0, kReverse = 1, kBidirectional = 2 }; inline Direction MakeDirection(const std::string& direction) { if (direction == "forward") { return kForward; } if (direction == "reverse") { return kReverse; } if (direction == "bidirectional") { return kBidirectional; } ORT_THROW("Invalid 'direction' argument of '", direction, "'. Must be one of 'forward', 'reverse', or 'bidirectional'."); } /** Allocate a unique_ptr using allocator_, and return a span to the allocated memory so usage is safe @param allocator IAllocator to use for the allocation. @param size Allocation size. Number of elements of type TAlloc, or total size if TAlloc is 'void'. @param unique_ptr unique_ptr that will control the lifetime of the allocated memory. @param fill If true, fill the allocated memory with fill_value. @param fill_value Value to use if 'fill' is true. @returns A span to provide bounds checked access to the allocated memory. */ template <typename TAlloc> gsl::span<TAlloc> Allocate(std::shared_ptr<IAllocator> allocator, size_t size, IAllocatorUniquePtr<TAlloc>& unique_ptr, bool fill = false, TAlloc fill_value = TAlloc{}) { unique_ptr = IAllocator::MakeUniquePtr<TAlloc>(allocator, size); auto span = gsl::make_span(unique_ptr.get(), size); if (fill) { // Do't use span.begin() it will cause performance issue and stop compiler to optimize the code std::fill_n(unique_ptr.get(), size, fill_value); } return span; } // validate the common inputs to RNN, LSTM and GRU operators Status ValidateCommonRnnInputs(const Tensor& X, const Tensor& W, const Tensor& R, const Tensor* B, int WRB_dim_1_multipler, // multiplier used with hidden_size for W, R and B inputs const Tensor* sequence_lens, const Tensor* initial_h, int64_t num_directions, int64_t hidden_size); /// Copy an input array repeatedly to an output array /// @param input_begin Beginning of input /// @param input_end End of input /// @param output Output iterator /// @param repetitions Number of times to repeat copy. Assumes output is sufficiently sized. /// @returns Position of output iterator after copy is completed template <typename TInIter, typename TOutIter> TOutIter RepeatVectorToConstructArray(TInIter input_begin, TInIter input_end, TOutIter output, int64_t repetitions) { for (int64_t i = 0; i < repetitions; i++) { output = std::copy(input_begin, input_end, output); } return output; } // reverse an LSTM or GRU sequence which has shape [seq_length, batch_size, hidden_size] // and output to shape [seq_length, num_directions, batch_size, hidden_size] template <typename T> void ReverseSequence(gsl::span<const T> inputs, gsl::span<T> inputs_reverse, gsl::span<const int> sequence_lengths, const int max_sequence_length, const int batch_size, const int input_size, const int num_directions, concurrency::ThreadPool*) { for (int i = 0; i < batch_size; i++) { int seq_len = sequence_lengths[i]; #ifdef _OPENMP // Parallel execute the loop. #pragma omp parallel for #endif for (int j = 0; j < seq_len; j++) { gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size); gsl::span<T> dest = inputs_reverse.subspan(num_directions * (seq_len - j - 1) * batch_size * input_size + i * input_size, input_size); // Use gsl::copy instead of std::copy() to allow compiler to optimize the code gsl::copy(src, dest); } #ifdef _OPENMP // Parallel execute the loop. #pragma omp parallel for #endif for (int j = seq_len; j < max_sequence_length; j++) { gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size); gsl::span<T> dest = inputs_reverse.subspan(num_directions * j * batch_size * input_size + i * input_size, input_size); // Use gsl::copy instead of std::copy() to allow compiler to optimize the code gsl::copy(src, dest); } } } // A has size M x K, B has size N x K (transposed), and C has size M x N // We check that A, B and C are large enough before calling the lower level GEMM implementation template <typename TSpanAIter, typename TSpanBIter, typename TSpanCIter> void ComputeGemm(const int M, const int N, const int K, const float alpha, TSpanAIter A, TSpanAIter A_end, const int lda, TSpanBIter B, TSpanBIter B_end, const int ldb, const float beta, TSpanCIter C, TSpanCIter C_end, const int ldc, concurrency::ThreadPool* tp) { // validate all the inputs // need to use the lda/ldb/ldc strides which should be >= the columns for the span ORT_ENFORCE(lda >= K && ldb >= K && ldc >= N); ORT_ENFORCE(A + (M * lda - (lda - K)) <= A_end); ORT_ENFORCE(B + (N * ldb - (ldb - K)) <= B_end); ORT_ENFORCE(C + (M * ldc - (ldc - N)) <= C_end); ::onnxruntime::math::GemmEx<float>( CblasNoTrans, CblasTrans, M, N, K, alpha, &*A, lda, &*B, ldb, beta, &*C, ldc, tp); } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> const T* SafeRawConstPointer(typename gsl::span<T>::const_iterator cur, typename gsl::span<T>::const_iterator end, size_t size) { ORT_ENFORCE(cur + size <= end); return &*cur; } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> const T* SafeRawConstPointer(gsl::span<T> span, size_t offset, size_t size) { ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data(); } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> T* SafeRawPointer(typename gsl::span<T>::iterator cur, typename gsl::span<T>::iterator end, size_t size) { ORT_ENFORCE(cur + size <= end); return &*cur; } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> T* SafeRawPointer(typename gsl::span<T> span, size_t offset, size_t size) { ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data() + offset; } void DumpMatrixImpl(const std::string& name, const float* src, int row, int col, int offset = 0, int col_width = -1); // Helper class to wrap the processing of the activation funcs and any alpha/beta values. // The alpha/beta values are consumed in the order of the activation funcs. once they run out // defaults will be used as needed. // The Entries property contains the normalized function names and the alpha/beta value to use. class ActivationFuncs { public: struct Entry { const std::string name; const float alpha; const float beta; }; ActivationFuncs() = default; ActivationFuncs(const std::vector<std::string>& funcs, const std::vector<float>& alphas, const std::vector<float>& betas); const std::vector<Entry>& Entries() const { return entries_; } private: std::vector<Entry> entries_; }; namespace deepcpu { using AddBiasIntoFuncPtr = void (*)(const float*, float*, const int); using ClipWithBiasFuncPtr = void (*)(float, const float*, float*, const int); using ActivationFuncPtr = void (*)(float*, int, float, float); using ActivationFuncBPtr = void (*)(const float*, float*, int, float, float); using LstmMergeGatesFuncPtr = void (*)(const float*, float*, const float*, float*, int, float, float); using GruResetGateFuncPtr = void (*)(const float*, float*, float*, int, float, float); using GruOutputGateFuncPtr = void (*)(float*, const float*, const float*, float*, int, float, float); ActivationFuncPtr ActivationFuncByName(const std::string& func); LstmMergeGatesFuncPtr LstmMergeGatesFuncByName(const std::string& func); GruResetGateFuncPtr GruResetGateFuncByName(const std::string& func); GruOutputGateFuncPtr GruOutputGateFuncByName(const std::string& func); void add_bias_into_ignore(const float* ignored, const float* pd, int c); void add_bias_into(const float* ps, float* pd, int c); void clip(float b, float* pd, int c); void clip_add_bias(float b, const float* pb, float* pd, int c); void clip_ignore_bias(float b, const float* pb, float* pd, int c); void sigmoid_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void tanh_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void relu_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void sigmoid_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void tanh_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void sigmoid(float* pd, int c, float alpha, float beta); void tanh(float* pd, int c, float alpha, float beta); void relu(float* pd, int c, float alpha, float beta); void sigmoid_exact(float* pd, int c, float alpha, float beta); void tanh_exact(float* pd, int c, float alpha, float beta); void merge_lstm_gates_to_memory(const float* pprev, const float* pi, const float* pf, const float* pg, float* pcurr, int c); void gru_reset_gate_tanh(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta); void gru_reset_gate_sigmoid(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta); void gru_reset_gate_relu(const float* ps1, const float* ps2, float* pd, int c, float alpha, float beta); void gru_output_gate_tanh(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); void gru_output_gate_sigmoid(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); void gru_output_gate_relu(const float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); inline void elementwise_product(const float* op1, const float* op2, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += op1[i] * op2[i]; } inline void elementwise_sum1(const float* src, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += src[i]; } inline void elementwise_sum2(const float* src1, const float* src2, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += src1[i] + src2[i]; } } // namespace deepcpu } // namespace detail } // namespace rnn } // namespace onnxruntime
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/parser.h> # include <libxml/tree.h> #endif /* Definitions */ #define LCMSHDRI #if !defined(MAGICKCORE_HDRI_SUPPORT) #if (MAGICKCORE_QUANTUM_DEPTH == 8) #undef LCMSHDRI #define LCMSScaleSource(pixel) ScaleQuantumToShort(pixel) #define LCMSScaleTarget(pixel) ScaleShortToQuantum(pixel) typedef unsigned short LCMSType; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) #undef LCMSHDRI #define LCMSScaleSource(pixel) (pixel) #define LCMSScaleTarget(pixel) (pixel) typedef unsigned short LCMSType; #endif #endif #if defined(LCMSHDRI) #define LCMSScaleSource(pixel) (source_scale*QuantumScale*(pixel)) #define LCMSScaleTarget(pixel) ClampToQuantum(target_scale*QuantumRange*(pixel)) typedef double LCMSType; #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) #if LCMS_VERSION < 2060 static void* cmsGetContextUserData(cmsContext ContextID) { return(ContextID); } static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData) { magick_unreferenced(Plugin); return((cmsContext) UserData); } static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID), cmsLogErrorHandlerFunction Fn) { magick_unreferenced(ContextID); cmsSetLogErrorHandler(Fn); } static void cmsDeleteContext(cmsContext magick_unused(ContextID)) { magick_unreferenced(ContextID); } #endif static LCMSType **DestroyPixelThreadSet(LCMSType **pixels) { register ssize_t i; if (pixels == (LCMSType **) NULL) return((LCMSType **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (LCMSType *) NULL) pixels[i]=(LCMSType *) RelinquishMagickMemory(pixels[i]); pixels=(LCMSType **) RelinquishMagickMemory(pixels); return(pixels); } static LCMSType **AcquirePixelThreadSet(const size_t columns, const size_t channels) { LCMSType **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(LCMSType **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (LCMSType **) NULL) return((LCMSType **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(LCMSType *) AcquireQuantumMemory(columns,channels* sizeof(**pixels)); if (pixels[i] == (LCMSType *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet( const cmsHPROFILE source_profile,const cmsUInt32Number source_type, const cmsHPROFILE target_profile,const cmsUInt32Number target_type, const int intent,const cmsUInt32Number flags,cmsContext cms_context) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(cms_context,source_profile,source_type, target_profile,target_type,intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context); if (cms_exception == (CMSExceptionInfo *) NULL) return; exception=cms_exception->exception; if (exception == (ExceptionInfo *) NULL) return; image=cms_exception->image; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s', %s (#%u)",image->filename, message != (char *) NULL ? message : "no message",severity); } #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #define ThrowProfileException(severity,tag,context) \ { \ if (cms_context != (cmsContext) NULL) \ cmsDeleteContext(cms_context); \ if (source_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_profile); \ if (target_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); /* Future. value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R03.") != 0) (void) SetAdobeRGB1998ImageProfile(image,exception); */ icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsHPROFILE source_profile; cmsContext cms_context; CMSExceptionInfo cms_exception; /* Transform pixel colors as defined by the color profiles. */ cms_exception.image=image; cms_exception.exception=exception; cms_context=cmsCreateContext(NULL,&cms_exception); if (cms_context == (cmsContext) NULL) ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler); source_profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_profile == (cmsHPROFILE) NULL) { cmsDeleteContext(cms_context); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; ColorspaceType source_colorspace, target_colorspace; cmsColorSpaceSignature signature; cmsHPROFILE target_profile; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags, source_type, target_type; int intent; LCMSType **magick_restrict source_pixels, **magick_restrict target_pixels; #if defined(LCMSHDRI) LCMSType source_scale, target_scale; #endif MagickOffsetType progress; size_t source_channels, target_channels; ssize_t y; target_profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_profile=source_profile; source_profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(icc_profile), (cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } #if defined(LCMSHDRI) source_scale=1.0; #endif source_colorspace=sRGBColorspace; source_channels=3; switch (cmsGetColorSpace(source_profile)) { case cmsSigCmykData: { source_colorspace=CMYKColorspace; source_channels=4; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_CMYK_DBL; source_scale=100.0; #else source_type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { source_colorspace=GRAYColorspace; source_channels=1; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_GRAY_DBL; #else source_type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { source_colorspace=LabColorspace; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_Lab_DBL; source_scale=100.0; #else source_type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigLuvData: { source_colorspace=YUVColorspace; source_type=(cmsUInt32Number) TYPE_YUV_16; break; } #endif case cmsSigRgbData: { source_colorspace=sRGBColorspace; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_RGB_DBL; #else source_type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { source_colorspace=XYZColorspace; #if defined(LCMSHDRI) source_type=(cmsUInt32Number) TYPE_XYZ_DBL; #else source_type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigYCbCrData: { source_colorspace=YUVColorspace; source_type=(cmsUInt32Number) TYPE_YCbCr_16; break; } #endif default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } (void) source_colorspace; signature=cmsGetPCS(source_profile); if (target_profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_profile); #if defined(LCMSHDRI) target_scale=1.0; #endif target_channels=3; switch (signature) { case cmsSigCmykData: { target_colorspace=CMYKColorspace; target_channels=4; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_CMYK_DBL; target_scale=0.01; #else target_type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { target_colorspace=GRAYColorspace; target_channels=1; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_GRAY_DBL; #else target_type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { target_colorspace=LabColorspace; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_Lab_DBL; target_scale=0.01; #else target_type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigLuvData: { target_colorspace=YUVColorspace; target_type=(cmsUInt32Number) TYPE_YUV_16; break; } #endif case cmsSigRgbData: { target_colorspace=sRGBColorspace; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_RGB_DBL; #else target_type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { target_colorspace=XYZColorspace; #if defined(LCMSHDRI) target_type=(cmsUInt32Number) TYPE_XYZ_DBL; #else target_type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } #if !defined(LCMSHDRI) case cmsSigYCbCrData: { target_colorspace=YUVColorspace; target_type=(cmsUInt32Number) TYPE_YCbCr_16; break; } #endif default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break; case PerceptualIntent: intent=INTENT_PERCEPTUAL; break; case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break; case SaturationIntent: intent=INTENT_SATURATION; break; default: intent=INTENT_PERCEPTUAL; break; } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(source_profile,source_type, target_profile,target_type,intent,flags,cms_context); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_pixels=AcquirePixelThreadSet(image->columns,source_channels); target_pixels=AcquirePixelThreadSet(image->columns,target_channels); if ((source_pixels == (LCMSType **) NULL) || (target_pixels == (LCMSType **) NULL)) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (source_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); return(MagickFalse); } if (target_colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_colorspace,exception); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register LCMSType *p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } p=source_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=LCMSScaleSource(GetPixelRed(image,q)); if (source_channels > 1) { *p++=LCMSScaleSource(GetPixelGreen(image,q)); *p++=LCMSScaleSource(GetPixelBlue(image,q)); } if (source_channels > 3) *p++=LCMSScaleSource(GetPixelBlack(image,q)); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_pixels[id],target_pixels[id], (unsigned int) image->columns); p=target_pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_channels == 1) SetPixelGray(image,LCMSScaleTarget(*p),q); else SetPixelRed(image,LCMSScaleTarget(*p),q); p++; if (target_channels > 1) { SetPixelGreen(image,LCMSScaleTarget(*p),q); p++; SetPixelBlue(image,LCMSScaleTarget(*p),q); p++; } if (target_channels > 3) { SetPixelBlack(image,LCMSScaleTarget(*p),q); p++; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ProfileImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait == UndefinedPixelTrait ? TrueColorType : TrueColorAlphaType; break; } case cmsSigCmykData: { image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; break; } case cmsSigGrayData: { image->type=image->alpha_trait == UndefinedPixelTrait ? GrayscaleType : GrayscaleAlphaType; break; } default: break; } target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile,exception); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); } (void) cmsCloseProfile(source_profile); cmsDeleteContext(cms_context); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; register const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; register const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } #if defined(MAGICKCORE_XML_DELEGATE) static MagickBooleanType ValidateXMPProfile(const StringInfo *profile) { xmlDocPtr document; /* Parse XML profile. */ document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int) GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR | XML_PARSE_NOWARNING); if (document == (xmlDocPtr) NULL) return(MagickFalse); xmlFreeDoc(document); return(MagickTrue); } #else static MagickBooleanType ValidateXMPProfile(const StringInfo *profile) { return(MagickFalse); } #endif static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MagickPathExtent], property[MagickPathExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((LocaleCompare(name,"xmp") == 0) && (ValidateXMPProfile(profile) == MagickFalse)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "CorruptImageProfile","`%s'",name); return(MagickTrue); } if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MagickPathExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,profile); } /* Inject profile into image properties. */ (void) FormatLocaleString(property,MagickPathExtent,"%s:*",name); (void) GetImageProperty(image,property,exception); return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54* 65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x* 65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54* 65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y* 65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } p+=count; length-=count; } return(MagickTrue); } MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) (image->units+1),p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,profile) == MagickFalse) status=MagickFalse; return(status); }
1140.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop { #pragma omp parallel for num_threads(2) for (i = 0; i < _PB_NY; i++) { y[i] = 0; } #pragma omp parallel for num_threads(2) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
scan.h
#include <omp.h> #include <vector> template <typename InTy = unsigned, typename OutTy = unsigned> inline void parallel_prefix_sum(const std::vector<InTy>& in, OutTy *prefix) { const size_t block_size = 1 << 20; const size_t num_blocks = (in.size() + block_size - 1) / block_size; std::vector<OutTy> local_sums(num_blocks); // count how many bits are set on each thread #pragma omp parallel for for (size_t block = 0; block < num_blocks; block ++) { OutTy lsum = 0; size_t block_end = std::min((block + 1) * block_size, in.size()); for (size_t i = block * block_size; i < block_end; i++) lsum += in[i]; local_sums[block] = lsum; } std::vector<OutTy> bulk_prefix(num_blocks + 1); OutTy total = 0; for (size_t block = 0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; #pragma omp parallel for for (size_t block = 0; block < num_blocks; block ++) { OutTy local_total = bulk_prefix[block]; size_t block_end = std::min((block + 1) * block_size, in.size()); for (size_t i = block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += in[i]; } } prefix[in.size()] = bulk_prefix[num_blocks]; } template <typename InTy = unsigned, typename OutTy = unsigned> inline void prefix_sum(const std::vector<InTy>& in, OutTy *prefix) { OutTy total = 0; for (size_t n = 0; n < in.size(); n++) { prefix[n] = total; total += (OutTy)in[n]; } prefix[in.size()] = total; }
GB_unaryop__identity_fp32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp32_fp64 // op(A') function: GB_tran__identity_fp32_fp64 // C type: float // A type: double // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp32_fp64 ( float *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
c3_fmt.c
/* * Generic crypt(3) support, as well as support for glibc's crypt_r(3) and * Solaris' MT-safe crypt(3C) with OpenMP parallelization. * * This file is part of John the Ripper password cracker, * Copyright (c) 2009-2015 by Solar Designer * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. */ #if AC_BUILT #include "autoconfig.h" #endif #if HAVE_CRYPT #undef _XOPEN_SOURCE #undef _XOPEN_SOURCE_EXTENDED #undef _XOPEN_VERSION #undef _XPG4_2 #undef _GNU_SOURCE #define _XOPEN_SOURCE 4 /* for crypt(3) */ #define _XOPEN_SOURCE_EXTENDED 1 /* for OpenBSD */ #define _XOPEN_VERSION 4 #define _XPG4_2 #define _GNU_SOURCE 1 /* for crypt_r(3) */ #include <stdio.h> #if !AC_BUILT #include <string.h> #ifndef _MSC_VER #include <strings.h> #endif #ifdef __CYGWIN__ #include <crypt.h> #endif #if defined(_OPENMP) && defined(__GLIBC__) #include <crypt.h> #else #if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER #include <unistd.h> #endif #endif #endif #if STRING_WITH_STRINGS #include <string.h> #include <strings.h> #elif HAVE_STRING_H #include <string.h> #elif HAVE_STRINGS_H #include <strings.h> #endif #if (!AC_BUILT && defined(HAVE_CRYPT)) #undef HAVE_CRYPT_H #define HAVE_CRYPT_H 1 #endif #if HAVE_CRYPT_H #include <crypt.h> #endif #if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER #include <unistd.h> #endif #if defined(_OPENMP) #include <omp.h> /* for omp_get_thread_num() */ #endif #include "options.h" #include "arch.h" #include "misc.h" #include "params.h" #include "memory.h" #include "common.h" #include "formats.h" #include "loader.h" #include "john.h" #ifdef HAVE_MPI #include "john-mpi.h" #endif #include "memdbg.h" #define FORMAT_LABEL "crypt" #define FORMAT_NAME "generic crypt(3)" #define ALGORITHM_NAME "?/" ARCH_BITS_STR #define BENCHMARK_COMMENT " DES" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 72 #define BINARY_SIZE 128 #define BINARY_ALIGN 1 #define SALT_SIZE BINARY_SIZE #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 96 #define MAX_KEYS_PER_CRYPT 96 static struct fmt_tests tests[] = { {"CCNf8Sbh3HDfQ", "U*U*U*U*"}, {"CCX.K.MFy4Ois", "U*U***U"}, {"CC4rMpbg9AMZ.", "U*U***U*"}, {"XXxzOu6maQKqQ", "*U*U*U*U"}, {"SDbsugeBiC58A", ""}, {NULL} }; static char saved_key[MAX_KEYS_PER_CRYPT][PLAINTEXT_LENGTH + 1]; static char saved_salt[SALT_SIZE]; static char crypt_out[MAX_KEYS_PER_CRYPT][BINARY_SIZE]; #if defined(_OPENMP) && defined(__GLIBC__) #define MAX_THREADS MAX_KEYS_PER_CRYPT /* We assume that this is zero-initialized (all NULL pointers) */ static struct crypt_data *crypt_data[MAX_THREADS]; #endif static void init(struct fmt_main *self) { if (options.subformat) { int i; char *salt = tests[0].ciphertext; #if defined(_OPENMP) && defined(__GLIBC__) struct crypt_data data; data.initialized = 0; #endif /* * Allow * ./john --list=format-tests --format=crypt --subformat=md5crypt * in addition to * ./john --test --format=crypt --subformat=md5crypt * * That's why, don't require FLG_TEST_CHK to be set. */ if (options.flags & FLG_PASSWD) { fprintf(stderr, "\n%s: --subformat option is only for --test or --list=format-tests\n", FORMAT_LABEL); error(); } if (!strcmp(options.subformat, "?")) { fprintf(stderr, "Subformat may either be a verbatim salt, or: descrypt, md5crypt, bcrypt, sha256crypt, sha512crypt, sun-md5\n\n"); error(); } else if (!strcasecmp(options.subformat, "md5crypt") || !strcasecmp(options.subformat, "md5")) { static struct fmt_tests tests[] = { {"$1$12345678$aIccj83HRDBo6ux1bVx7D1", "0123456789ABCDE"}, {"$1$12345678$f8QoJuo0DpBRfQSD0vglc1", "12345678"}, {"$1$$qRPK7m23GJusamGpoGLby/", ""}, {NULL} }; self->params.tests = tests; self->params.benchmark_comment = " MD5"; salt = "$1$dXc3I7Rw$"; } else if (!strcasecmp(options.subformat, "sunmd5") || !strcasecmp(options.subformat, "sun-md5")) { static struct fmt_tests tests[] = { {"$md5$rounds=904$Vc3VgyFx44iS8.Yu$Scf90iLWN6O6mT9TA06NK/", "test"}, {"$md5$rounds=904$ZZZig8GS.S0pRNhc$dw5NMYJoxLlnFq4E.phLy.", "Don41dL33"}, {"$md5$rounds=904$zSuVTn567UJLv14u$q2n2ZBFwKg2tElFBIzUq/0", "J4ck!3Wood"}, {NULL} }; self->params.tests = tests; self->params.benchmark_comment = " SunMD5"; salt = "$md5$rounds=904$Vc3VgyFx44iS8.Yu$dummy"; } else if ((!strcasecmp(options.subformat, "sha256crypt")) || (!strcasecmp(options.subformat, "sha-256")) || (!strcasecmp(options.subformat, "sha256"))) { static struct fmt_tests tests[] = { {"$5$LKO/Ute40T3FNF95$U0prpBQd4PloSGU0pnpM4z9wKn4vZ1.jsrzQfPqxph9", "U*U*U*U*"}, {"$5$LKO/Ute40T3FNF95$fdgfoJEBoMajNxCv3Ru9LyQ0xZgv0OBMQoq80LQ/Qd.", "U*U***U"}, {"$5$LKO/Ute40T3FNF95$8Ry82xGnnPI/6HtFYnvPBTYgOL23sdMXn8C29aO.x/A", "U*U***U*"}, {NULL} }; self->params.tests = tests; self->params.benchmark_comment = " SHA-256 rounds=5000"; salt = "$5$LKO/Ute40T3FNF95$"; } else if ((!strcasecmp(options.subformat, "sha512crypt")) || (!strcasecmp(options.subformat, "sha-512")) || (!strcasecmp(options.subformat, "sha512"))) { static struct fmt_tests tests[] = { {"$6$LKO/Ute40T3FNF95$6S/6T2YuOIHY0N3XpLKABJ3soYcXD9mB7uVbtEZDj/LNscVhZoZ9DEH.sBciDrMsHOWOoASbNLTypH/5X26gN0", "U*U*U*U*"}, {"$6$LKO/Ute40T3FNF95$wK80cNqkiAUzFuVGxW6eFe8J.fSVI65MD5yEm8EjYMaJuDrhwe5XXpHDJpwF/kY.afsUs1LlgQAaOapVNbggZ1", "U*U***U"}, {"$6$LKO/Ute40T3FNF95$YS81pp1uhOHTgKLhSMtQCr2cDiUiN03Ud3gyD4ameviK1Zqz.w3oXsMgO6LrqmIEcG3hiqaUqHi/WEE2zrZqa/", "U*U***U*"}, {NULL} }; self->params.tests = tests; self->params.benchmark_comment = " SHA-512 rounds=5000"; salt = "$6$LKO/Ute40T3FNF95$"; } else if ((!strcasecmp(options.subformat, "bf")) || (!strcasecmp(options.subformat, "blowfish")) || (!strcasecmp(options.subformat, "bcrypt"))) { static struct fmt_tests tests[] = { {"$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW","U*U"}, {"$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK","U*U*"}, {"$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a","U*U*U"}, {NULL} }; self->params.tests = tests; self->params.benchmark_comment = " BF x32"; salt = "$2a$05$AD6y0uWY62Xk2TXZ"; } else if (!strcasecmp(options.subformat, "descrypt") || !strcasecmp(options.subformat, "des")) { salt = "CC"; } else { char *p = mem_alloc_tiny(strlen(options.subformat) + 2, MEM_ALIGN_NONE); strcpy(p, " "); strcat(p, options.subformat); self->params.benchmark_comment = p; salt = options.subformat; /* turn off many salts test, since we are not updating the */ /* params.tests structure data. */ self->params.benchmark_length = -1; } for (i = 0; i < 5; i++) { char *c; #if defined(_OPENMP) && defined(__GLIBC__) c = crypt_r(tests[i].plaintext, salt, &data); #else c = crypt(tests[i].plaintext, salt); #endif if (c && strlen(c) >= 7) tests[i].ciphertext = strdup(c); else { fprintf(stderr, "%s not supported on this system\n", options.subformat); error(); } } if (strlen(tests[0].ciphertext) == 13 && strcasecmp(options.subformat, "descrypt") && strcasecmp(options.subformat, "des")) { fprintf(stderr, "%s not supported on this system\n", options.subformat); error(); } } } static int valid(char *ciphertext, struct fmt_main *self) { int length, count_base64, count_base64_2, id, pw_length; char pw[PLAINTEXT_LENGTH + 1], *new_ciphertext; /* We assume that these are zero-initialized */ static char sup_length[BINARY_SIZE], sup_id[0x80]; length = count_base64 = count_base64_2 = 0; while (ciphertext[length]) { if (atoi64[ARCH_INDEX(ciphertext[length])] != 0x7F) { count_base64++; if (length >= 2) count_base64_2++; } length++; } if (length < 13 || length >= BINARY_SIZE) return 0; id = 0; if (length == 13 && count_base64 == 13) /* valid salt */ id = 1; else if (length == 13 && count_base64_2 == 11) /* invalid salt */ id = 2; else if (length >= 13 && count_base64_2 >= length - 2 && /* allow for invalid salt */ (length - 2) % 11 == 0) id = 3; else if (length == 20 && count_base64 == 19 && ciphertext[0] == '_') id = 4; else if (ciphertext[0] == '$') { id = (unsigned char)ciphertext[1]; if (id <= 0x20 || id >= 0x80) id = 9; } else if (ciphertext[0] == '*' || ciphertext[0] == '!') /* likely locked */ id = 10; /* Previously detected as supported */ if (sup_length[length] > 0 && sup_id[id] > 0) return 1; /* Previously detected as unsupported */ if (sup_length[length] < 0 && sup_id[id] < 0) return 0; pw_length = ((length - 2) / 11) << 3; if (pw_length >= sizeof(pw)) pw_length = sizeof(pw) - 1; memcpy(pw, ciphertext, pw_length); /* reuse the string, why not? */ pw[pw_length] = 0; #if defined(_OPENMP) && defined(__GLIBC__) /* * Let's use crypt_r(3) just like we will in crypt_all() below. * It is possible that crypt(3) and crypt_r(3) differ in their supported hash * types on a given system. */ { struct crypt_data **data = &crypt_data[0]; if (!*data) { /* * **data is not exactly tiny, but we use mem_alloc_tiny() for its alignment * support and error checking. We do not need to free() this memory anyway. * * The page alignment is to keep different threads' data on different pages. */ *data = mem_alloc_tiny(sizeof(**data), MEM_ALIGN_PAGE); memset(*data, 0, sizeof(**data)); } new_ciphertext = crypt_r(pw, ciphertext, *data); } #else new_ciphertext = crypt(pw, ciphertext); #endif if (new_ciphertext && strlen(new_ciphertext) == length && !strncmp(new_ciphertext, ciphertext, 2)) { sup_length[length] = 1; sup_id[id] = 1; return 1; } if (id != 10 && !ldr_in_pot) if (john_main_process) fprintf(stderr, "Warning: " "hash encoding string length %d, type id %c%c\n" "appears to be unsupported on this system; " "will not load such hashes.\n", length, id > 0x20 ? '$' : '#', id > 0x20 ? id : '0' + id); if (!sup_length[length]) sup_length[length] = -1; if (!sup_id[id]) sup_id[id] = -1; return 0; } static void *binary(char *ciphertext) { static char out[BINARY_SIZE]; strncpy(out, ciphertext, sizeof(out)); /* NUL padding is required */ return out; } static void *salt(char *ciphertext) { static char out[SALT_SIZE]; int cut = sizeof(out); #if 1 /* This piece is optional, but matching salts are not detected without it */ int length = strlen(ciphertext); switch (length) { case 13: case 24: cut = 2; break; case 20: if (ciphertext[0] == '_') cut = 9; break; case 35: case 46: case 57: if (ciphertext[0] != '$') cut = 2; /* fall through */ default: if ((length >= 26 && length <= 34 && !strncmp(ciphertext, "$1$", 3)) || (length >= 47 && !strncmp(ciphertext, "$5$", 3)) || (length >= 90 && !strncmp(ciphertext, "$6$", 3))) { char *p = strrchr(ciphertext + 3, '$'); if (p) cut = p - ciphertext; } else if (length == 59 && !strncmp(ciphertext, "$2$", 3)) cut = 28; else if (length == 60 && (!strncmp(ciphertext, "$2a$", 4) || !strncmp(ciphertext, "$2b$", 4) || !strncmp(ciphertext, "$2x$", 4) || !strncmp(ciphertext, "$2y$", 4))) cut = 29; else if (length >= 27 && (!strncmp(ciphertext, "$md5$", 5) || !strncmp(ciphertext, "$md5,", 5))) { char *p = strrchr(ciphertext + 4, '$'); if (p) { /* NUL padding is required */ memset(out, 0, sizeof(out)); memcpy(out, ciphertext, ++p - ciphertext); /* * Workaround what looks like a bug in sunmd5.c: crypt_genhash_impl() where it * takes a different substring as salt depending on whether the optional * existing hash encoding is present after the salt or not. Specifically, the * last '$' delimiter is included into the salt when there's no existing hash * encoding after it, but is omitted from the salt otherwise. */ out[p - ciphertext] = 'x'; return out; } } } #endif /* NUL padding is required */ memset(out, 0, sizeof(out)); memcpy(out, ciphertext, cut); return out; } #define H(s, i) \ ((int)(unsigned char)(atoi64[ARCH_INDEX((s)[(i)])] ^ (s)[(i) - 1])) #define H0(s) \ int i = strlen(s) - 2; \ return i > 0 ? H((s), i) & PH_MASK_0 : 0 #define H1(s) \ int i = strlen(s) - 2; \ return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 4)) & PH_MASK_1 : 0 #define H2(s) \ int i = strlen(s) - 2; \ return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 6)) & PH_MASK_2 : 0 #define H3(s) \ int i = strlen(s) - 2; \ return i > 4 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \ (H((s), i - 4) << 10)) & PH_MASK_3 : 0 #define H4(s) \ int i = strlen(s) - 2; \ return i > 6 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \ (H((s), i - 4) << 10) ^ (H((s), i - 6) << 15)) & PH_MASK_4 : 0 static int binary_hash_0(void *binary) { H0((char *)binary); } static int binary_hash_1(void *binary) { H1((char *)binary); } static int binary_hash_2(void *binary) { H2((char *)binary); } static int binary_hash_3(void *binary) { H3((char *)binary); } static int binary_hash_4(void *binary) { H4((char *)binary); } static int get_hash_0(int index) { H0(crypt_out[index]); } static int get_hash_1(int index) { H1(crypt_out[index]); } static int get_hash_2(int index) { H2(crypt_out[index]); } static int get_hash_3(int index) { H3(crypt_out[index]); } static int get_hash_4(int index) { H4(crypt_out[index]); } static int salt_hash(void *salt) { int i, h; i = strlen((char *)salt) - 1; if (i > 1) i--; h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])]; h ^= ((unsigned char *)salt)[i - 1]; h <<= 6; h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i - 1])]; h ^= ((unsigned char *)salt)[i]; return h & (SALT_HASH_SIZE - 1); } static void set_salt(void *salt) { strcpy(saved_salt, salt); } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { static int warned = 0; int count = *pcount; int index; #if defined(_OPENMP) && defined(__GLIBC__) #pragma omp parallel for default(none) private(index) shared(warned, count, crypt_out, saved_key, saved_salt, crypt_data, stderr) for (index = 0; index < count; index++) { char *hash; int t = omp_get_thread_num(); if (t < MAX_THREADS) { struct crypt_data **data = &crypt_data[t]; if (!*data) { /* Stagger the structs to reduce their competition for the same cache lines */ size_t mask = MEM_ALIGN_PAGE, shift = 0; while (t) { mask >>= 1; if (mask < MEM_ALIGN_CACHE) break; if (t & 1) shift += mask; t >>= 1; } *data = (void *)((char *) mem_alloc_tiny(sizeof(**data) + shift, MEM_ALIGN_PAGE) + shift); memset(*data, 0, sizeof(**data)); } hash = crypt_r(saved_key[index], saved_salt, *data); } else { /* should not happen */ struct crypt_data data; memset(&data, 0, sizeof(data)); hash = crypt_r(saved_key[index], saved_salt, &data); } if (!hash) { #pragma omp critical if (!warned) { fprintf(stderr, "Warning: crypt_r() returned NULL\n"); warned = 1; } hash = ""; } strnzcpy(crypt_out[index], hash, BINARY_SIZE); } #else #if defined(_OPENMP) && defined(__sun) /* * crypt(3C) is MT-safe on Solaris. For traditional DES-based hashes, this is * implemented with locking (hence there's no speedup from the use of multiple * threads, and the per-thread performance is extremely poor anyway). For * modern hash types, the function is actually able to compute multiple hashes * in parallel by different threads (and the performance for some hash types is * reasonable). Overall, this code is reasonable to use for SHA-crypt and * SunMD5 hashes, which are not yet supported by non-jumbo John natively. */ #pragma omp parallel for /* default(none) private(index) shared(warned, count, crypt_out, saved_key, saved_salt, stderr) or __iob */ #endif for (index = 0; index < count; index++) { char *hash = crypt(saved_key[index], saved_salt); if (!hash) { #if defined(_OPENMP) && defined(__sun) #pragma omp critical #endif if (!warned) { fprintf(stderr, "Warning: crypt() returned NULL\n"); warned = 1; } hash = ""; } strnzcpy(crypt_out[index], hash, BINARY_SIZE); } #endif return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (!strcmp((char *)binary, crypt_out[index])) return 1; return 0; } static int cmp_one(void *binary, int index) { return !strcmp((char *)binary, crypt_out[index]); } static int cmp_exact(char *source, int index) { return 1; } /* * For generic crypt(3), the algorithm is returned as the first "tunable cost": * 0: unknown (shouldn't happen * 1: descrypt * 2: md5crypt * 3: sunmd5 * 4: bcrypt * 5: sha256crypt * 6: sha512crypt * New subformats should be added to the end of the list. * Otherwise, restored sessions might contine cracking different hashes * if the (not yet implemented) option --cost= had been used * when starting that session. */ static unsigned int c3_subformat_algorithm(void *salt) { char *c3_salt; c3_salt = salt; if (!c3_salt[0] || !c3_salt[1] ) return 0; if (!c3_salt[2]) return 1; if (c3_salt[0] != '$') return 0; if (c3_salt[1] == '1') return 2; if (c3_salt[1] == 'm') return 3; if (c3_salt[1] == '2' && c3_salt[2] == 'a') return 4; if (c3_salt[1] == '5') return 5; if (c3_salt[1] == '6') return 6; return 0; } static unsigned int c3_algorithm_specific_cost1(void *salt) { unsigned int algorithm, rounds; char *c3_salt; c3_salt = salt; algorithm = c3_subformat_algorithm(salt); if(algorithm < 3) /* no tunable cost parameters */ return 1; switch (algorithm) { case 1: // DES return 25; case 2: // cryptmd5 return 1000; case 3: // sun_md5 c3_salt = strstr(c3_salt, "rounds="); if (!c3_salt) { return 904+4096; // default } sscanf(c3_salt, "rounds=%d", &rounds); return rounds+4096; case 4: // bf c3_salt += 4; sscanf(c3_salt, "%d", &rounds); return rounds; case 5: case 6: // sha256crypt and sha512crypt handled the same: $x$rounds=xxxx$salt$hash (or $x$salt$hash for 5000 round default); c3_salt += 3; if (strncmp(c3_salt, "rounds=", 7)) return 5000; // default sscanf(c3_salt, "rounds=%d", &rounds); return rounds; } return 1; } struct fmt_main fmt_crypt = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { /* * use algorithm as first tunable cost: * (0: unknown) * descrypt, md5crypt, sunmd5, bcrypt, sha512crypt, sha256crypt */ "algorithm [1:descrypt 2:md5crypt 3:sunmd5 4:bcrypt 5:sha256crypt 6:sha512crypt]", "algorithm specific iterations", }, { NULL }, tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, binary, salt, { c3_subformat_algorithm, #if 1 c3_algorithm_specific_cost1 #endif }, fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, NULL, NULL }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, NULL, NULL }, cmp_all, cmp_one, cmp_exact } }; #endif // HAVE_CRYPT
fusion_nb.c
#include <stdlib.h> #include "fusion_nb.h" void fusion_nb(float* l,int m,int n,float*output){ #pragma omp parallel for for (int H4 = 1; H4 < (n - (1 + 0)); H4++) { for (int H5 = 1; H5 < (m - (1 + 0)); H5++) { float tmp2 = 0; float tmp3 = 0; float tmp4 = 0; float tmp5 = 0; tmp5 = l[(((m)) * (H4 - (1))) + H5 - (1)]; float tmp6 = 0; tmp6 = l[(((m)) * (H4 - (1))) + H5]; tmp4 = tmp5 + tmp6; float tmp7 = 0; tmp7 = l[(((m)) * (H4 - (1))) + H5 + 1]; tmp3 = tmp4 + tmp7; float tmp8 = 0; float tmp9 = 0; float tmp10 = 0; tmp10 = l[(((m)) * (H4)) + H5 - (1)]; float tmp11 = 0; tmp11 = l[(((m)) * (H4)) + H5]; tmp9 = tmp10 + tmp11; float tmp12 = 0; tmp12 = l[(((m)) * (H4)) + H5 + 1]; tmp8 = tmp9 + tmp12; tmp2 = tmp3 + tmp8; float tmp13 = 0; float tmp14 = 0; float tmp15 = 0; tmp15 = l[(((m)) * (H4 + 1)) + H5 - (1)]; float tmp16 = 0; tmp16 = l[(((m)) * (H4 + 1)) + H5]; tmp14 = tmp15 + tmp16; float tmp17 = 0; tmp17 = l[(((m)) * (H4 + 1)) + H5 + 1]; tmp13 = tmp14 + tmp17; output[(((m - (1 + 0)) - 1)) * ((H4 - (1))) + (H5 - (1))] = tmp2 + tmp13; } } }
sections.c
#include <stdio.h> #include <omp.h> void funcA() { printf("En funcA: esta sección la ejecuta el thread %d\n", omp_get_thread_num()); } void funcB() { printf("En funcB: esta sección la ejecuta el thread %d\n", omp_get_thread_num()); } void main() { #pragma omp parallel { #pragma omp sections { #pragma omp section (void) funcA(); #pragma omp section (void) funcB(); } } }
kmp_set_defaults_lock_bug.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" /* The bug occurs if the lock table is reallocated after kmp_set_defaults() is called. If the table is reallocated, then the lock will not point to a valid lock object after the kmp_set_defaults() call.*/ omp_lock_t lock; int test_kmp_set_defaults_lock_bug() { /* checks that omp_get_num_threads is equal to the number of threads */ int nthreads_lib; int nthreads = 0; nthreads_lib = -1; #pragma omp parallel { omp_set_lock(&lock); nthreads++; omp_unset_lock(&lock); #pragma omp single { nthreads_lib = omp_get_num_threads (); } /* end of single */ } /* end of parallel */ kmp_set_defaults("OMP_NUM_THREADS"); #pragma omp parallel { omp_set_lock(&lock); nthreads++; omp_unset_lock(&lock); } /* end of parallel */ return (nthreads == 2*nthreads_lib); } int main() { int i; int num_failed=0; omp_init_lock(&lock); for(i = 0; i < REPETITIONS; i++) { if(!test_kmp_set_defaults_lock_bug()) { num_failed++; } } omp_destroy_lock(&lock); return num_failed; }
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT32_INT64_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates */ template<int ndim> MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) { ++(*coord)[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; } return (*coord)[0] < shape[0]; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } /*! \brief input is a tensor and the output is a boolean tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } #ifndef _WIN32 /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } #endif /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is a tensor and a scalar value with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
matmul_offload.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define SEED 1234 #define min(x,y) (((x) < (y)) ? (x) : (y)) #if defined DOUBLE typedef double PREC; #else typedef float PREC; #endif PREC random_number(); void random_matrix(PREC*, int, int); void zero_matrix(PREC*, int, int) ; void print_matrix(PREC*, int , int, const char*); void mat_mul(PREC*, PREC*, PREC*, int, int, int); int main(int argc, char **argv) { int m, n, p; double elapsed_time, gflops; struct timeval t1, t2; // matrix size if (argc != 4) { m = 4; n = 4; p = 4; } else { m = atoi(argv[1]); n = atoi(argv[2]); p = atoi(argv[3]); } // allocation PREC* A = (PREC*) malloc(sizeof(PREC) * m * p); PREC* B = (PREC*) malloc(sizeof(PREC) * p * n); PREC* C = (PREC*) malloc(sizeof(PREC) * m * n); //initialize A, B srand(SEED); random_matrix(A, m, p); random_matrix(B, p, n); //initialize C zero_matrix(C, m, n); // start timing gettimeofday(&t1, NULL); // C = A * B mat_mul(A, B, C, m, n, p); // end timing gettimeofday(&t2, NULL); // walltime elapsed_time = (t2.tv_usec - t1.tv_usec)*1e-6 + (t2.tv_sec - t1.tv_sec); printf("Timing: %10.3f (s)\n", elapsed_time); // gflops gflops = (2.0*m*n*p - 1.0*m*p)*1E-9; printf("Performance: %10.3f (GFlops)\n", gflops/elapsed_time); // debug print_matrix(A, m, p, "A ="); print_matrix(B, p, n, "B ="); print_matrix(C, m, n, "C ="); //deallocate free(A); free(B); free(C); return 0; } void random_matrix(PREC *matrix, int m, int n) { for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) matrix[i*n + j] = random_number(); } void zero_matrix(PREC *matrix, int m, int n ) { for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) matrix[i*n + j] = 0.0; } void mat_mul(PREC* A, PREC* B, PREC* C, int m, int n, int p) { #pragma omp target teams distribute parallel for map(to: A[0:m*p], B[0:p*n]) map(tofrom: C[0:m*n]) collapse(2) for (int i = 0; i < m; i++) for (int k = 0; k < p; k++) for (int j = 0; j < n; j++) C[i*n+j] += A[i*p+k] * B[k*n+j]; } void print_matrix(PREC *matrix, int m , int n, const char *name ) { printf("%s\n", name); for (int i=0; i<min(m,4); i++) { for (int j=0; j<min(n,4); j++) { printf ("%12.5f", matrix[i*n+j]); } printf ("\n"); } } PREC random_number() { return ((PREC)rand() / (PREC)RAND_MAX); }
spotrs.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zpotrs.c, normal z -> s, Fri Sep 28 17:38:02 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_potrs * * Solves a system of linear equations A * X = B with a symmetric positive * definite in the complex matrix A using the Cholesky factorization * A = U^T*U or A = L*L^T computed by plasma_spotrf. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of * columns of the matrix B. nrhs >= 0. * * @param[in,out] pA * The triangular factor U or L from the Cholesky * factorization A = U^T*U or A = L*L^T, computed by * plasma_spotrf. * Remark: If out-of-place layout translation is used, the * matrix A can be considered as input, however if inplace * layout translation is enabled, the content of A will be * reordered for computation and restored before exiting the * function. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * * @param[in,out] pB * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_spotrs * @sa plasma_cpotrs * @sa plasma_dpotrs * @sa plasma_spotrs * @sa plasma_spotrf * ******************************************************************************/ int plasma_spotrs(plasma_enum_t uplo, int n, int nrhs, float *pA, int lda, float *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -3; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -5; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -7; } // quick return if (imax(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_trsm(plasma, PlasmaRealFloat, n, n); // Set tiling parameters. int nb = plasma->nb; // Initialize tile matrix descriptors. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_sge2desc(pA, lda, A, &sequence, &request); plasma_omp_sge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_spotrs(uplo, A, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_sdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization // Free matrix A in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_potrs * * Solves a system of linear equations using previously * computed Cholesky factorization. * Non-blocking tile version of plasma_spotrs(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * The triangular factor U or L from the Cholesky factorization * A = U^T*U or A = L*L^T, computed by plasma_spotrf. * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_spotrs * @sa plasma_omp_spotrs * @sa plasma_omp_cpotrs * @sa plasma_omp_dpotrs * @sa plasma_omp_spotrs * @sa plasma_omp_spotrf * ******************************************************************************/ void plasma_omp_spotrs(plasma_enum_t uplo, plasma_desc_t A, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pstrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans, PlasmaNonUnit, 1.0, A, B, sequence, request); plasma_pstrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans, PlasmaNonUnit, 1.0, A, B, sequence, request); }
bugz-50967-c.c
#include <stdio.h> #include <omp.h> // This smoke test has shows two problems. // The first is compile fail when the subtype has a smaller size than 4 bytes // That is both char and short fail. We need to generate compile fail // for amdgcn when atomic type is char or short OR use a temp 4 byte // value, which may not be atomic. // // The 2nd problem is a runtime fail. num_threads(64) has no control over // actual number of threads when the default thread limit is 256. // Set SUBTYPE to anything equal or greater size than int // for atomic update not to cause fail in llc. #define SUBTYPE char #define REALTYPE int int f() { REALTYPE b = 0; #pragma omp target map(tofrom: b) { #pragma omp teams distribute // thread_limit(64) // add clause thread_limit(64) above to circumvent the problem // not getting num_threads 64 in parallel below. // Without thread_limit clause, this incorrectly reports 256 for(int i = 0; i < 1; ++i) { SUBTYPE a = 0; #pragma omp parallel num_threads(64) { #pragma omp atomic update a += 1; } b = (REALTYPE) (a); } } if (b == 64 ) return 0; printf("ERROR: expecting 64 got %d\n",b); return 1; } int main() { return f(); }
GB_unaryop__ainv_int64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int64_uint32 // op(A') function: GB_tran__ainv_int64_uint32 // C type: int64_t // A type: uint32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int64_uint32 ( int64_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nnint.c
#include<Python.h> #include<numpy/arrayobject.h> #include<math.h> #include<omp.h> #define IND(a,i) *((double *)(a->data+i*a->strides[0])) #define IND_int(a,i) *((int *)(a->data+i*a->strides[0])) #define IND2(a,i,j) *((double *)(a->data+i*a->strides[0]+j*a->strides[1])) #define IND2_int(a,i,j) *((int *)(a->data+i*a->strides[0]+j*a->strides[1])) static PyObject *nnint(PyObject *self, PyObject *args, PyObject *keywds); static PyObject *nnint(PyObject *self, PyObject *args, PyObject *keywds) { PyObject *posflux, *retbinflux, *retbinstd, *issmoothing; PyObject *wbfipmask; PyArrayObject *x, *y, *flux, *binfluxmask, *kernel, *binloc, *dydx, *etc, *ipparams; //need to make some temp tuples to read in from argument list, then parse // a,a,a,a,a dtype=int,a,tuple[d,d,d,d],array[a,a]dtyp=int,array[a,a,a,a],tuple[int,int],bool PyObject *tup1, *tup2; //initialize the keywords retbinflux = Py_False; retbinstd = Py_False; //make the keywords list static char *kwlist[] = {"ipparams","posflux","etc","retbinflux","retbinstd",NULL}; if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|OOO",kwlist,&ipparams,&posflux\ ,&etc,&retbinflux,&retbinstd)) { return NULL; } //now we must break appart the posflux tuple y = (PyArrayObject *) PyList_GetItem(posflux,0); x = (PyArrayObject *) PyList_GetItem(posflux,1); flux = (PyArrayObject *) PyList_GetItem(posflux,2); wbfipmask = PyList_GetItem(posflux,3); binfluxmask = (PyArrayObject *) PyList_GetItem(posflux,4); kernel = (PyArrayObject *) PyList_GetItem(posflux,5); tup1 = PyList_GetItem(posflux,6); binloc = (PyArrayObject *) PyList_GetItem(posflux,7); dydx = (PyArrayObject *) PyList_GetItem(posflux,8); tup2 = PyList_GetItem(posflux,9); issmoothing = PyList_GetItem(posflux,10); //create the arrays the will be returned, under various conditions PyArrayObject *output, *binflux, *binstd, *tempwbfip; npy_intp dims[1]; dims[0] = flux->dimensions[0]; output = (PyArrayObject *) PyArray_SimpleNew(1,dims,NPY_DOUBLE); dims[0] = PyList_Size(wbfipmask); binflux = (PyArrayObject *) PyArray_SimpleNew(1,dims,NPY_DOUBLE); binstd = (PyArrayObject *) PyArray_SimpleNew(1,dims,NPY_DOUBLE); int dis = binfluxmask->dimensions[0]; int i,j,arsize,temp_int,counter; double temp_mean,temp_std,meanbinflux; //need to make a lock to deal with the menbinflux variable omp_lock_t lck; omp_init_lock(&lck); counter = 0; meanbinflux = 0; // remind kevin to make all wbfipmask things arrays // shared(lck,meanbinflux,counter) #pragma omp parallel for private(j,tempwbfip, arsize,temp_mean,temp_std,temp_int) for(i = 0; i<dis;i++) { if(IND_int(binfluxmask,i) == 1) { if(PyObject_IsTrue(retbinstd) == 1) { tempwbfip = (PyArrayObject *) PyList_GetItem(wbfipmask,i); arsize = tempwbfip->dimensions[0]; temp_mean = 0; temp_std = 0; for(j=0;j<arsize;j++) { temp_int = IND_int(tempwbfip,j); temp_mean += (IND(flux,temp_int)/IND(etc,temp_int)); } temp_mean /= (double) arsize; for(j=0;j<arsize;j++) { temp_int = IND_int(tempwbfip,j); temp_std += pow(((IND(flux,temp_int)/IND(etc,temp_int))\ -temp_mean),2); } temp_std /= (double) arsize; temp_std = sqrt(temp_std); IND(binflux,i) = temp_mean; IND(binstd,i) = temp_std; omp_set_lock(&lck); meanbinflux += temp_mean; counter += 1; omp_unset_lock(&lck); } else { tempwbfip = (PyArrayObject *) PyList_GetItem(wbfipmask,i); arsize = tempwbfip->dimensions[0]; temp_mean = 0; for(j=0;j<arsize;j++) { temp_int = IND_int(tempwbfip,j); temp_mean += (IND(flux,temp_int)/IND(etc,temp_int)); } temp_mean /= (double) arsize; IND(binflux,i) = temp_mean; omp_set_lock(&lck); meanbinflux += temp_mean; counter += 1; omp_unset_lock(&lck); } } else { IND(binflux,i) = 0; IND(binstd, i) = 0; } } meanbinflux /= (double) counter; #pragma omp parallel for for(i=0;i<dims[0];i++) { IND(binflux,i) /= meanbinflux; IND(binstd, i) /= meanbinflux; } dims[0] = flux->dimensions[0]; #pragma omp parallel for for(i=0;i<dims[0];i++) { temp_int = IND2_int(binloc,0,i); IND(output,i) = IND(binflux,temp_int); } if(PyObject_IsTrue(retbinflux) == 0 && PyObject_IsTrue(retbinstd) == 0) { Py_XDECREF(binflux); Py_XDECREF(binstd); return PyArray_Return(output); } else if (PyObject_IsTrue(retbinflux) == 1 && PyObject_IsTrue(retbinstd)==1) { return Py_BuildValue("NNN",output,binflux,binstd); } else if (PyObject_IsTrue(retbinflux) == 1) { Py_XDECREF(binstd); return Py_BuildValue("NN",output,binflux); } else { Py_XDECREF(binflux); return Py_BuildValue("NN",output,binstd); } } static char module_docstring[]="\ This function fits the intra-pixel sensitivity effect using the mean \n\ within a given binned position (nearest-neighbor interpolation).\n\ \n\ Parameters\n\ ----------\n\ ipparams : tuple\n\ unused\n\ y : 1D array, size = # of measurements\n\ Pixel position along y\n\ x : 1D array, size = # of measurements\n\ Pixel position along x\n\ flux : 1D array, size = # of measurements\n\ Observed flux at each position\n\ wherebinflux : 1D array, size = # of bins\n\ Measurement number assigned to each bin\n\ gridpt : 1D array, size = # of measurements \n\ \n\ Returns\n\ -------\n\ 1D array, size = # of measurements\n\ Normalized intrapixel-corrected flux multiplier \n\ \n\ Revisions\n\ ---------\n\ 2010-06-07 Kevin Stevenson, UCF \n\ kevin218@knights.ucf.edu\n\ Original version\n\ 2010-07-07 Kevin\n\ Added wbfipmask\n\ 2011-01-06 nate lust, ucf\n\ natelust at linux dot com\n\ converted to c extension function\n\ 2018-11-27 Jonathan Fraine, SSI\n\ jfraine at spacescience.org\n\ Updated c extensions to python3, with support for python2.7\n\ "; static PyMethodDef module_methods[] = { {"nnint",(PyCFunction)nnint,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}}; PyMODINIT_FUNC #if PY_MAJOR_VERSION >= 3 PyInit_nnint(void) #else initnnint(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module; static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "nnint", /* m_name */ module_docstring, /* m_doc */ -1, /* m_size */ module_methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; #endif #if PY_MAJOR_VERSION >= 3 module = PyModule_Create(&moduledef); if (!module) return NULL; /* Load `numpy` functionality. */ import_array(); return module; #else PyObject *m = Py_InitModule3("nnint", module_methods, module_docstring); if (m == NULL) return; /* Load `numpy` functionality. */ import_array(); #endif }
GB_binop__lt_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_int8) // A.*B function (eWiseMult): GB (_AemultB_08__lt_int8) // A.*B function (eWiseMult): GB (_AemultB_02__lt_int8) // A.*B function (eWiseMult): GB (_AemultB_04__lt_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int8) // A*D function (colscale): GB (_AxD__lt_int8) // D*A function (rowscale): GB (_DxB__lt_int8) // C+=B function (dense accum): GB (_Cdense_accumB__lt_int8) // C+=b function (dense accum): GB (_Cdense_accumb__lt_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int8) // C=scalar+B GB (_bind1st__lt_int8) // C=scalar+B' GB (_bind1st_tran__lt_int8) // C=A+scalar GB (_bind2nd__lt_int8) // C=A'+scalar GB (_bind2nd_tran__lt_int8) // C type: bool // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_INT8 || GxB_NO_LT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
communication.h
/*! @brief Flag for checking if this header has already been included. */ #ifndef YGGCOMMUNICATION_H_ #define YGGCOMMUNICATION_H_ #include "../tools.h" #include "../datatypes/datatypes.h" #include "CommBase.h" #include "IPCComm.h" #include "ZMQComm.h" #include "ServerComm.h" #include "ClientComm.h" #include "AsciiFileComm.h" #include "AsciiTableComm.h" #include "DefaultComm.h" #ifdef __cplusplus /* If this is a C++ compiler, use C linkage */ extern "C" { #endif /*! @brief Memory to keep track of comms to clean up at exit. */ static void **vcomms2clean = NULL; static size_t ncomms2clean = 0; static size_t clean_registered = 0; static size_t clean_in_progress = 0; static size_t clean_called = 0; #ifdef _OPENMP #pragma omp threadprivate(clean_in_progress) #endif /*! @brief Memory to keep track of global scope comms. */ #ifdef _OPENMP static size_t global_scope_comm = 1; #define WITH_GLOBAL_SCOPE(COMM) global_scope_comm = 1; COMM #pragma omp threadprivate(global_scope_comm) #else static size_t global_scope_comm = 0; #define WITH_GLOBAL_SCOPE(COMM) global_scope_comm = 1; COMM; global_scope_comm = 0 #endif /*! @brief Check if EOF should be sent for a comm being used on multiple threads. @param[in] x const comm_t* Comm to check. @returns int 1 if EOF has been sent for all but this comm and 0 otherwise. */ static int check_threaded_eof(const comm_t* x) { int out = 1; #ifdef _OPENMP #pragma omp critical (comms) { size_t i; comm_t* icomm = NULL; int nthreads = 1; for (i = 0; i < ncomms2clean; i++) { if ((out == 1) && (vcomms2clean[i] != NULL)) { icomm = (comm_t*)(vcomms2clean[i]); if ((strcmp(icomm->name, x->name) == 0) && (icomm->thread_id != x->thread_id)) { nthreads++; #pragma omp critical (sent_eof) { if ((x->const_flags != NULL) && (!(x->const_flags[0] & COMM_EOF_SENT))) out = 0; } } } } if (nthreads < omp_get_num_threads()) out = 0; // all threads havn't initialized a comm } #endif return out; }; /*! @brief Set the sent_eof flag on the comm. @param[in] x comm_t* Comm to set the flag for. */ static void set_sent_eof(const comm_t* x) { #ifdef _OPENMP #pragma omp critical (sent_eof) { #endif x->const_flags[0] = x->const_flags[0] | COMM_EOF_SENT; if (x->type == CLIENT_COMM) { comm_t *req_comm = (comm_t*)(x->handle); // Don't recurse to prevent block w/ omp critical recursion req_comm->const_flags[0] = req_comm->const_flags[0] | COMM_EOF_SENT; } #ifdef _OPENMP } #endif }; /*! @brief Retrieve a registered global comm if it exists. @param[in] name const char* name Name that comm might be registered under. @returns comm_t* Pointer to registered comm. NULL if one does not exist with the specified name. */ static comm_t* get_global_scope_comm(const char *name) { comm_t* out = NULL; #ifdef _OPENMP #pragma omp critical (comms) { #endif if (global_scope_comm) { size_t i; comm_t* icomm = NULL; int current_thread = get_thread_id(); for (i = 0; i < ncomms2clean; i++) { if (vcomms2clean[i] != NULL) { icomm = (comm_t*)(vcomms2clean[i]); if ((strcmp(icomm->name, name) == 0) && (icomm->thread_id == current_thread)) { out = icomm; break; } else { const char* YGG_MODEL_NAME = getenv("YGG_MODEL_NAME"); char alt_name[100]; sprintf(alt_name, "%s:%s", YGG_MODEL_NAME, name); if ((strcmp(icomm->name, alt_name) == 0) && (icomm->thread_id == current_thread)) { out = icomm; break; } } } } } #ifdef _OPENMP } #endif return out; }; // Forward declaration of eof static int comm_send_eof(const comm_t *x); static int comm_nmsg(const comm_t *x); /*! @brief Determine if a channel has a format type associated with it. @param[in] x comm_t * Pointer to communicator to check. @returns int 1 if format type, 0 otherwise. */ static int is_comm_format_array_type(const comm_t *x) { dtype_t *datatype = x->datatype; return is_dtype_format_array(datatype); }; /*! @brief Determine if the current thread can use a comm registered by another. @param[in] thread_id int Thread that created the comm. @returns int 1 if the current thread can use the comm, 0 otherwise. */ static int thread_can_use(int thread_id) { int current_thread_id = get_thread_id(); if ((clean_in_progress) && (current_thread_id == 0)) return 1; if (thread_id == current_thread_id) return 1; return 0; }; /*! @brief Perform deallocation for type specific communicator. @param[in] x comm_t * Pointer to communicator to deallocate. @returns int 1 if there is an error, 0 otherwise. */ static int free_comm_type(comm_t *x) { comm_type t = x->type; int ret = 1; if (!(thread_can_use(x->thread_id))) { ygglog_error("free_comm_type: Thread is attempting to use a comm it did not initialize"); return ret; } if (t == IPC_COMM) ret = free_ipc_comm(x); else if (t == ZMQ_COMM) ret = free_zmq_comm(x); else if (t == SERVER_COMM) ret = free_server_comm(x); else if (t == CLIENT_COMM) ret = free_client_comm(x); else if (t == ASCII_FILE_COMM) ret = free_ascii_file_comm(x); else if ((t == ASCII_TABLE_COMM) || (t == ASCII_TABLE_ARRAY_COMM)) ret = free_ascii_table_comm(x); else { ygglog_error("free_comm_type: Unsupported comm_type %d", t); } return ret; }; /*! @brief Perform deallocation for generic communicator. @param[in] x comm_t * Pointer to communicator to deallocate. @returns int 1 if there is an error, 0 otherwise. */ static int free_comm(comm_t *x) { int ret = 0; if (x == NULL) return ret; ygglog_debug("free_comm(%s)", x->name); // Send EOF for output comms and then wait for messages to be recv'd if ((is_send(x->direction)) && (x->flags & COMM_FLAG_VALID)) { if (_ygg_error_flag == 0) { ygglog_debug("free_comm(%s): Sending EOF", x->name); comm_send_eof(x); while (comm_nmsg(x) > 0) { ygglog_debug("free_comm(%s): draining %d messages", x->name, comm_nmsg(x)); usleep(YGG_SLEEP_TIME); } } else { ygglog_error("free_comm(%s): Error registered", x->name); } } #ifdef _OPENMP #pragma omp critical (comms) { #endif ret = free_comm_type(x); int idx = x->index_in_register; free_comm_base(x); if (idx >= 0) { if (vcomms2clean[idx] != NULL) { free(vcomms2clean[idx]); vcomms2clean[idx] = NULL; } } ygglog_debug("free_comm: Finished"); #ifdef _OPENMP } #endif return ret; }; /*! @brief Free comms created that were not freed. */ static void clean_comms(void) { #ifdef _OPENMP #pragma omp critical (clean) { #endif size_t i; if (!(clean_called)) { clean_in_progress = 1; ygglog_debug("atexit begin"); if (vcomms2clean != NULL) { for (i = 0; i < ncomms2clean; i++) { if (vcomms2clean[i] != NULL) { free_comm((comm_t*)(vcomms2clean[i])); } } } #ifdef _OPENMP #pragma omp critical (comms) { #endif if (vcomms2clean != NULL) { free(vcomms2clean); vcomms2clean = NULL; } ncomms2clean = 0; ygglog_debug("atexit finished cleaning comms, in final shutdown"); #if defined(ZMQINSTALLED) // #if defined(_MSC_VER) && defined(ZMQINSTALLED) ygg_zsys_shutdown(); #endif if (Py_IsInitialized()) { Py_Finalize(); } /* printf(""); */ clean_called = 1; #ifdef _OPENMP } #endif } #ifdef _OPENMP } #endif ygglog_debug("atexit done"); if (_ygg_error_flag != 0) { _exit(_ygg_error_flag); } }; /*! @brief Initialize yggdrasil in a thread-safe way */ static inline int ygg_init() { int out = 0; #ifdef _OPENMP #pragma omp critical (init) { #endif ygglog_debug("ygg_init: clean_registered = %d", clean_registered); if (clean_registered == 0) { #if defined(ZMQINSTALLED) if (!(ygg_zsys_init())) { out = -1; } #endif if (out == 0) { ygglog_debug("ygg_init: Registering cleanup"); atexit(clean_comms); clean_registered = 1; } } #ifdef _OPENMP } #endif return out; }; /*! @brief Register a comm so that it can be cleaned up later if not done explicitly. @param[in] x comm_t* Address of communicator structure that should be registered. @returns int -1 if there is an error, 0 otherwise. */ static int register_comm(comm_t *x) { if (x == NULL) { return 0; } int error_flag = 0; #ifdef _OPENMP #pragma omp critical (comms) { #endif if (ygg_init()) { error_flag = 1; } else { void **t_vcomms2clean = (void**)realloc(vcomms2clean, sizeof(void*)*(ncomms2clean + 1)); if (t_vcomms2clean == NULL) { ygglog_error("register_comm(%s): Failed to realloc the comm list.", x->name); error_flag = -1; } else { vcomms2clean = t_vcomms2clean; x->index_in_register = (int)ncomms2clean; vcomms2clean[ncomms2clean++] = (void*)x; } } #ifdef _OPENMP } #endif return error_flag; }; /*! @brief Initialize a new communicator based on its type. @param[in] x comm_t * Pointer to communicator structure initialized with new_base_comm; @returns int -1 if the comm could not be initialized. */ static int new_comm_type(comm_t *x) { comm_type t = x->type; int flag; if (t == IPC_COMM) flag = new_ipc_address(x); else if (t == ZMQ_COMM) flag = new_zmq_address(x); else if (t == SERVER_COMM) flag = new_server_address(x); else if (t == CLIENT_COMM) flag = new_client_address(x); else if (t == ASCII_FILE_COMM) flag = new_ascii_file_address(x); else if (t == ASCII_TABLE_COMM) flag = new_ascii_table_address(x); else if (t == ASCII_TABLE_ARRAY_COMM) flag = new_ascii_table_array_address(x); else { ygglog_error("new_comm_type: Unsupported comm_type %d", t); flag = -1; } return flag; }; /*! @brief Initialize the communicator based on its type. @param[in] x comm_t * Pointer to communicator structure initialized with init_base_comm; @returns int -1 if the comm could not be initialized. */ static int init_comm_type(comm_t *x) { comm_type t = x->type; int flag; if (t == IPC_COMM) flag = init_ipc_comm(x); else if (t == ZMQ_COMM) flag = init_zmq_comm(x); else if (t == SERVER_COMM) flag = init_server_comm(x); else if (t == CLIENT_COMM) flag = init_client_comm(x); else if (t == ASCII_FILE_COMM) flag = init_ascii_file_comm(x); else if (t == ASCII_TABLE_COMM) flag = init_ascii_table_comm(x); else if (t == ASCII_TABLE_ARRAY_COMM) flag = init_ascii_table_array_comm(x); else { ygglog_error("init_comm_type: Unsupported comm_type %d", t); flag = -1; } ygglog_debug("init_comm_type(%s): Done, flag = %d", x->name, flag); return flag; }; /*! @brief Initialize comm from the address. @param[in] address char * Address for new comm. If NULL, a new address is generated. @param[in] direction Direction that messages will go through the comm. Values include "recv" and "send". @param[in] t comm_type Type of comm that should be created. @param[in] datatype dtype_t* Pointer to data type structure. @returns comm_t* Pointer to comm structure. */ static comm_t* new_comm(char *address, const char *direction, const comm_type t, dtype_t* datatype) { comm_t *ret = new_comm_base(address, direction, t, datatype); if (ret == NULL) { ygglog_error("new_comm: Could not initialize base."); return ret; } int flag; if (address == NULL) { flag = new_comm_type(ret); } else { flag = init_comm_type(ret); } if (flag < 0) { ygglog_error("new_comm: Failed to initialize new comm address."); ret->flags = ret->flags & ~COMM_FLAG_VALID; } else { if (strlen(ret->name) == 0) { sprintf(ret->name, "temp.%s", ret->address); } flag = register_comm(ret); if (flag < 0) { ygglog_error("new_comm: Failed to register new comm."); ret->flags = ret->flags & ~COMM_FLAG_VALID; } } return ret; }; /*! @brief Initialize a generic communicator. The name is used to locate the comm address stored in the associated environment variable. @param[in] name Name of environment variable that the queue address is stored in. @param[in] direction Direction that messages will go through the comm. Values include "recv" and "send". @param[in] t comm_type Type of comm that should be created. @param[in] datatype dtype_t* Pointer to data type structure. @returns comm_t* Comm structure. */ static comm_t* init_comm(const char *name, const char *direction, const comm_type t, dtype_t *datatype) { ygglog_debug("init_comm: Initializing comm."); #ifdef _MSC_VER SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); _set_abort_behavior(0,_WRITE_ABORT_MSG); #endif comm_t *ret = get_global_scope_comm(name); if (ret != NULL) { destroy_dtype(&datatype); return ret; } if ((datatype == NULL) && (strcmp(direction, "send") == 0)) { datatype = create_dtype_scalar("bytes", 0, "", false); } ret = init_comm_base(name, direction, t, datatype); if (ret == NULL) { ygglog_error("init_comm(%s): Could not initialize base.", name); return ret; } int flag = init_comm_type(ret); if (flag < 0) { ygglog_error("init_comm(%s): Could not initialize comm.", name); ret->flags = ret->flags & ~COMM_FLAG_VALID; } else { flag = register_comm(ret); if (flag < 0) { ygglog_error("init_comm(%s): Failed to register new comm.", name); ret->flags = ret->flags & ~COMM_FLAG_VALID; } } if (ret->flags & COMM_FLAG_VALID) { if (global_scope_comm) { ret->flags = ret->flags | COMM_FLAG_GLOBAL; ygglog_debug("init_comm(%s): Global comm!", name); } ygglog_debug("init_comm(%s): Initialized comm.", name); } return ret; }; /*! @brief Convert a format string to a datatype. @param[in] format_str char* Format string. @param[in] as_array int If 1, inputs/outputs are processed as arrays. @returns dtype_t* Pointer to datatype structure. */ static dtype_t* formatstr2datatype(const char *format_str, const int as_array) { dtype_t* datatype = NULL; if (format_str != NULL) { datatype = create_dtype_format(format_str, as_array, false); } return datatype; }; /*! @brief Initialize a generic communicator using a format string to determine the type. The name is used to locate the comm address stored in the associated environment variable. @param[in] name Name of environment variable that the queue address is stored in. @param[in] direction Direction that messages will go through the comm. Values include "recv" and "send". @param[in] t comm_type Type of comm that should be created. @param[in] format_str char* Format string. @param[in] as_array int If 1, inputs/outputs are processed as arrays. @returns comm_t* Pointer to comm structure. */ static comm_t* init_comm_format(const char *name, const char *direction, const comm_type t, const char *format_str, const int as_array) { dtype_t* datatype = formatstr2datatype(format_str, as_array); comm_t* out = init_comm(name, direction, t, datatype); if ((format_str != NULL) && (datatype == NULL)) { ygglog_error("init_comm_format: Failed to create type from format_str."); if (out != NULL) { out->flags = out->flags & ~COMM_FLAG_VALID; } } return out; }; /*! @brief Get number of messages in the comm. @param[in] x comm_t Communicator to check. @returns int Number of messages. */ static int comm_nmsg(const comm_t *x) { int ret = -1; if ((x == NULL) || (!(x->flags & COMM_FLAG_VALID))) { ygglog_error("comm_nmsg: Invalid comm"); return ret; } comm_type t = x->type; if (t == IPC_COMM) ret = ipc_comm_nmsg(x); else if (t == ZMQ_COMM) ret = zmq_comm_nmsg(x); else if (t == SERVER_COMM) ret = server_comm_nmsg(x); else if (t == CLIENT_COMM) ret = client_comm_nmsg(x); else if (t == ASCII_FILE_COMM) ret = ascii_file_comm_nmsg(x); else if ((t == ASCII_TABLE_COMM) || (t == ASCII_TABLE_ARRAY_COMM)) ret = ascii_table_comm_nmsg(x); else { ygglog_error("comm_nmsg: Unsupported comm_type %d", t); } return ret; }; /*! @brief Send a single message to the comm. Send a message smaller than YGG_MSG_MAX bytes to an output comm. If the message is larger, it will not be sent. @param[in] x comm_t* structure that comm should be sent to. @param[in] data character pointer to message that should be sent. @param[in] len size_t length of message to be sent. @returns int 0 if send succesfull, -1 if send unsuccessful. */ static int comm_send_single(const comm_t *x, const char *data, const size_t len) { ygglog_debug("Sending %d bytes: '%s'\n", len, data); int ret = -1; if ((x == NULL) || (!(x->flags & COMM_FLAG_VALID))) { ygglog_error("comm_send_single: Invalid comm"); return ret; } if (!(thread_can_use(x->thread_id))) { ygglog_error("comm_send_single: Thread is attempting to use a comm it did not initialize"); return ret; } comm_type t = x->type; if (t == IPC_COMM) ret = ipc_comm_send(x, data, len); else if (t == ZMQ_COMM) ret = zmq_comm_send(x, data, len); else if (t == SERVER_COMM) ret = server_comm_send(x, data, len); else if (t == CLIENT_COMM) ret = client_comm_send(x, data, len); else if (t == ASCII_FILE_COMM) ret = ascii_file_comm_send(x, data, len); else if ((t == ASCII_TABLE_COMM) || (t == ASCII_TABLE_ARRAY_COMM)) ret = ascii_table_comm_send(x, data, len); else { ygglog_error("comm_send_single: Unsupported comm_type %d", t); } if (ret >= 0) { time(x->last_send); /* time_t now; */ /* time(&now); */ /* x->last_send[0] = now; */ } return ret; }; /*! @brief Create header for multipart message. @param[in] x comm_t* structure that header will be sent to. @param[in] data const char * Message to be sent. @param[in] len size_t Size of message body. @returns comm_head_t Header info that should be sent before the message body. */ static comm_head_t comm_send_multipart_header(const comm_t *x, const char * data, const size_t len) { comm_head_t head = init_header(len, NULL, NULL); sprintf(head.id, "%d", rand()); char *model_name = getenv("YGG_MODEL_NAME"); if (model_name != NULL) { strcpy(head.model, model_name); } char *model_copy = getenv("YGG_MODEL_COPY"); if (model_copy != NULL) { strcat(head.model, "_copy"); strcat(head.model, model_copy); } head.flags = head.flags | HEAD_FLAG_VALID | HEAD_FLAG_MULTIPART; // Add datatype information to header if (!(x->flags & COMM_FLAG_FILE)) { dtype_t *datatype; if (x->type == CLIENT_COMM) { comm_t *req_comm = (comm_t*)(x->handle); datatype = req_comm->datatype; } else { datatype = x->datatype; } head.dtype = datatype; } const comm_t *x0; if (x->type == SERVER_COMM) { if (!(is_eof(data))) { head = server_response_header(x, head); } x0 = server_get_comm((requests_t*)(x->info), 0); if (x0 == NULL) { ygglog_error("comm_send_multipart_header(%s): no response comm registered", x->name); head.flags = head.flags & ~HEAD_FLAG_VALID; return head; } // This gives the server access to the ID of the message last received strcpy(head.id, x->address); } else if (x->type == CLIENT_COMM) { if (!(is_eof(data))) { head = client_response_header(x, head); } x0 = (comm_t*)(x->handle); } else { x0 = x; } // Get ZMQ header info if (x0->type == ZMQ_COMM) { char *reply_address = set_reply_send(x0); if (reply_address == NULL) { ygglog_error("comm_send_multipart_header: Could not set reply address."); head.flags = head.flags & ~HEAD_FLAG_VALID; return head; } strcpy(head.zmq_reply, reply_address); ygglog_debug("reply_address = %s\n", head.zmq_reply); } return head; }; /*! @brief Send a large message in multiple parts via a new comm. @param[in] x comm_t* Structure that message should be sent to. @param[in] data const char * Message that should be sent. @param[in] len size_t Size of data. @returns: int 0 if send successfull, -1 if send unsuccessful. */ static int comm_send_multipart(const comm_t *x, const char *data, const size_t len) { //char headbuf[YGG_MSG_BUF]; size_t headbuf_len = YGG_MSG_BUF; int headlen = 0, ret = -1; comm_t* xmulti = NULL; int no_type = is_eof(data); if ((x == NULL) || (!(x->flags & COMM_FLAG_VALID))) { ygglog_error("comm_send_multipart: Invalid comm"); return ret; } // Get header comm_head_t head = comm_send_multipart_header(x, data, len); if (!(head.flags & HEAD_FLAG_VALID)) { ygglog_error("comm_send_multipart: Invalid header generated."); return -1; } char *headbuf = (char*)malloc(headbuf_len); if (headbuf == NULL) { ygglog_error("comm_send_multipart: Failed to malloc headbuf."); return -1; } // Try to send body in header if (len < (x->maxMsgSize - x->msgBufSize)) { headlen = format_comm_header(&head, &headbuf, headbuf_len, x->maxMsgSize - x->msgBufSize, no_type); if (headlen < 0) { ygglog_error("comm_send_multipart: Failed to format header."); free(headbuf); return -1; } if (((size_t)headlen + len) < (x->maxMsgSize - x->msgBufSize)) { if (((size_t)headlen + len + 1) > headbuf_len) { char *t_headbuf = (char*)realloc(headbuf, (size_t)headlen + len + 1); if (t_headbuf == NULL) { ygglog_error("comm_send_multipart: Failed to realloc headbuf."); free(headbuf); return -1; } headbuf = t_headbuf; headbuf_len = (size_t)headlen + len + 1; } head.flags = head.flags & ~HEAD_FLAG_MULTIPART; memcpy(headbuf + headlen, data, len); headlen += (int)len; headbuf[headlen] = '\0'; } } // Get head string if (head.flags & HEAD_FLAG_MULTIPART) { // Get address for new comm and add to header xmulti = new_comm(NULL, "send", x->type, NULL); if ((xmulti == NULL) || (!(xmulti->flags & COMM_FLAG_VALID))) { ygglog_error("comm_send_multipart: Failed to initialize a new comm."); free(headbuf); return -1; } xmulti->const_flags[0] = xmulti->const_flags[0] | COMM_EOF_SENT | COMM_EOF_RECV; xmulti->flags = xmulti->flags | COMM_FLAG_WORKER; strcpy(head.address, xmulti->address); if (xmulti->type == ZMQ_COMM) { char *reply_address = set_reply_send(xmulti); if (reply_address == NULL) { ygglog_error("comm_send_multipart: Could not set worker reply address."); return -1; } strcpy(head.zmq_reply_worker, reply_address); ygglog_debug("comm_send_multipart: zmq worker reply address is '%s'", head.zmq_reply_worker); } headlen = format_comm_header(&head, &headbuf, headbuf_len, x->maxMsgSize - x->msgBufSize, no_type); if (headlen < 0) { ygglog_error("comm_send_multipart: Failed to format header."); free(headbuf); if (xmulti != NULL) { free_comm(xmulti); } return -1; } } // Send header size_t data_in_header = 0; if ((head.flags & HEAD_TYPE_IN_DATA) && ((size_t)headlen > (x->maxMsgSize - x->msgBufSize))) { ret = comm_send_single(x, headbuf, x->maxMsgSize - x->msgBufSize); data_in_header = headlen - (x->maxMsgSize - x->msgBufSize); } else { ret = comm_send_single(x, headbuf, headlen); } if (ret < 0) { ygglog_error("comm_send_multipart: Failed to send header."); if (xmulti != NULL) { free_comm(xmulti); } free(headbuf); return -1; } if (!(head.flags & HEAD_FLAG_MULTIPART)) { ygglog_debug("comm_send_multipart(%s): %d bytes completed", x->name, head.size); free(headbuf); return ret; } // Send data stored in header size_t msgsiz; size_t prev = headlen - data_in_header; while (prev < (size_t)headlen) { if ((headlen - prev) > (xmulti->maxMsgSize - xmulti->msgBufSize)) { msgsiz = xmulti->maxMsgSize - xmulti->msgBufSize; } else { msgsiz = headlen - prev; } ret = comm_send_single(xmulti, headbuf + prev, msgsiz); if (ret < 0) { ygglog_debug("comm_send_multipart(%s): send of data in header interupted at %d of %d bytes.", x->name, prev - (headlen - data_in_header), data_in_header); break; } prev += msgsiz; ygglog_debug("comm_send_multipart(%s): %d of %d bytes sent from data in header", x->name, prev - (headlen - data_in_header), data_in_header); } head.size = head.size - data_in_header; if (ret < 0) { ygglog_error("comm_send_multipart: Failed to send data from header."); if (xmulti != NULL) { free_comm(xmulti); } free(headbuf); return -1; } // Send multipart prev = 0; while (prev < head.size) { if ((head.size - prev) > (xmulti->maxMsgSize - xmulti->msgBufSize)) { msgsiz = xmulti->maxMsgSize - xmulti->msgBufSize; } else { msgsiz = head.size - prev; } ret = comm_send_single(xmulti, data + prev, msgsiz); if (ret < 0) { ygglog_debug("comm_send_multipart(%s): send interupted at %d of %d bytes.", x->name, prev, head.size); break; } prev += msgsiz; ygglog_debug("comm_send_multipart(%s): %d of %d bytes sent", x->name, prev, head.size); } if (ret == 0) ygglog_debug("comm_send_multipart(%s): %d bytes completed", x->name, head.size); // Free multipart if (xmulti != NULL) { free_comm(xmulti); } free(headbuf); if (ret >= 0) x->const_flags[0] = x->const_flags[0] | COMM_FLAGS_USED; return ret; }; /*! @brief Send a message to the comm. Send a message smaller than YGG_MSG_MAX bytes to an output comm. If the message is larger, it will not be sent. @param[in] x comm_t* structure that comm should be sent to. @param[in] data character pointer to message that should be sent. @param[in] len size_t length of message to be sent. @returns int 0 if send succesfull, -1 if send unsuccessful. */ static int comm_send(const comm_t *x, const char *data, const size_t len) { int ret = -1; if ((x == NULL) || (!(x->flags & COMM_FLAG_VALID))) { ygglog_error("comm_send: Invalid comm"); return ret; } if (x->const_flags == NULL) { ygglog_error("comm_send(%s): const_flags not initialized.", x->name); return ret; } int sending_eof = 0; if (is_eof(data)) { if (x->const_flags[0] & COMM_EOF_SENT) { ygglog_debug("comm_send(%s): EOF already sent", x->name); return ret; } else if (!(check_threaded_eof(x))) { ygglog_debug("comm_send(%s): EOF not sent on other threads", x->name); set_sent_eof(x); return 0; } else { set_sent_eof(x); sending_eof = 1; ygglog_debug("comm_send(%s): Sending EOF", x->name); } } if (((len > x->maxMsgSize) && (x->maxMsgSize > 0)) || (((x->flags & COMM_ALWAYS_SEND_HEADER) || (!(x->const_flags[0] & COMM_FLAGS_USED))))) { ygglog_debug("comm_send(%s): Sending as one or more messages with a header.", x->name); ret = comm_send_multipart(x, data, len); } else { ygglog_debug("comm_send(%s): Sending as single message without a header.", x->name); ret = comm_send_single(x, data, len); } if (sending_eof) { ygglog_debug("comm_send(%s): sent EOF, ret = %d", x->name, ret); } if (ret >= 0) x->const_flags[0] = x->const_flags[0] | COMM_FLAGS_USED; return ret; }; /*! @brief Send EOF message to the comm. @param[in] x comm_t structure that message should be sent to. @returns int 0 if send successfull, -1 otherwise. */ static int comm_send_eof(const comm_t *x) { int ret = -1; char buf[100] = YGG_MSG_EOF; ret = comm_send(x, buf, strlen(buf)); return ret; }; /*! @brief Receive a message from an input comm. Receive a message smaller than YGG_MSG_MAX bytes from an input comm. @param[in] x comm_t* structure that message should be sent to. @param[out] data char ** pointer to allocated buffer where the message should be saved. This should be a malloc'd buffer if allow_realloc is 1. @param[in] len const size_t length of the allocated message buffer in bytes. @param[in] allow_realloc const int If 1, the buffer will be realloced if it is not large enought. Otherwise an error will be returned. @returns int -1 if message could not be received, otherwise the length of the received message. */ static int comm_recv_single(comm_t *x, char **data, const size_t len, const int allow_realloc) { int ret = -1; if ((x == NULL) || (!(x->flags & COMM_FLAG_VALID))) { ygglog_error("comm_recv_single: Invalid comm"); return ret; } if (!(thread_can_use(x->thread_id))) { ygglog_error("comm_recv_single: Thread is attempting to use a comm it did not initialize"); return ret; } comm_type t = x->type; if (t == IPC_COMM) ret = ipc_comm_recv(x, data, len, allow_realloc); else if (t == ZMQ_COMM) ret = zmq_comm_recv(x, data, len, allow_realloc); else if (t == SERVER_COMM) ret = server_comm_recv(x, data, len, allow_realloc); else if (t == CLIENT_COMM) ret = client_comm_recv(x, data, len, allow_realloc); else if (t == ASCII_FILE_COMM) ret = ascii_file_comm_recv(x, data, len, allow_realloc); else if ((t == ASCII_TABLE_COMM) || (t == ASCII_TABLE_ARRAY_COMM)) ret = ascii_table_comm_recv(x, data, len, allow_realloc); else { ygglog_error("comm_recv: Unsupported comm_type %d", t); } return ret; }; /*! @brief Receive a message in multiple parts. @param[in] x comm_t* Comm that message should be recieved from. @param[in] data char ** Pointer to buffer where message should be stored. @param[in] len size_t Size of data buffer. @param[in] headlen size_t Size of header in data buffer. @param[in] allow_realloc int If 1, data will be realloced if the incoming message is larger than the buffer. Otherwise, an error will be returned. @returns int -1 if unsucessful, size of message received otherwise. */ static int comm_recv_multipart(comm_t *x, char **data, const size_t len, const size_t headlen, const int allow_realloc) { int ret = -1; if ((x == NULL) || (!(x->flags & COMM_FLAG_VALID))) { ygglog_error("comm_recv_multipart: Invalid comm"); return ret; } usleep(100); comm_head_t head = parse_comm_header(*data, headlen); if (!(head.flags & HEAD_FLAG_VALID)) { ygglog_error("comm_recv_multipart(%s): Error parsing header.", x->name); ret = -1; } else { // Move body to front of data and return if EOF memmove(*data, *data + head.bodybeg, head.bodysiz); (*data)[head.bodysiz] = '\0'; if (is_eof(*data)) { ygglog_debug("comm_recv_multipart(%s): EOF received.", x->name); x->const_flags[0] = x->const_flags[0] | COMM_EOF_RECV; destroy_header(&head); return -2; } // Get datatype information from header on first recv dtype_t *updtype = NULL; if (x->type == SERVER_COMM) { comm_t *handle = (comm_t*)(x->handle); updtype = handle->datatype; } else { updtype = x->datatype; } if (updtype == NULL) { ygglog_error("comm_recv_multipart(%s): Datatype is NULL.", x->name); destroy_header(&head); return -1; } if ((!(x->const_flags[0] & COMM_FLAGS_USED)) && (!(x->flags & COMM_FLAG_FILE)) && (updtype->obj == NULL) && (!(head.flags & HEAD_TYPE_IN_DATA))) { ygglog_debug("comm_recv_multipart(%s): Updating datatype to '%s'", x->name, head.dtype->type); ret = update_dtype(updtype, head.dtype); if (ret != 0) { ygglog_error("comm_recv_multipart(%s): Error updating datatype.", x->name); destroy_header(&head); return -1; } } else if ((!(x->flags & COMM_FLAG_FILE)) && (head.dtype != NULL)) { ygglog_debug("comm_recv_multipart(%s): Updating existing datatype to '%s' from '%s'", x->name, head.dtype->type, updtype->type); ret = update_dtype(updtype, head.dtype); if (ret != 0) { ygglog_error("comm_recv_multipart(%s): Error updating existing datatype.", x->name); destroy_header(&head); return -1; } } if (head.flags & HEAD_FLAG_MULTIPART) { ygglog_debug("comm_recv_multipart(%s): Message is multipart", x->name); // Return early if header contained entire message if (head.size == head.bodysiz) { x->const_flags[0] = x->const_flags[0] | COMM_FLAGS_USED; destroy_header(&head); return (int)(head.bodysiz); } // Get address for new comm comm_t* xmulti = new_comm(head.address, "recv", x->type, NULL); if ((xmulti == NULL) || (!(xmulti->flags & COMM_FLAG_VALID))) { ygglog_error("comm_recv_multipart: Failed to initialize a new comm."); destroy_header(&head); return -1; } xmulti->const_flags[0] = xmulti->const_flags[0] | COMM_EOF_SENT | COMM_EOF_RECV; xmulti->flags = xmulti->flags | COMM_FLAG_WORKER; if (xmulti->type == ZMQ_COMM) { int reply_socket = set_reply_recv(xmulti, head.zmq_reply_worker); if (reply_socket < 0) { ygglog_error("comm_recv_multipart: Failed to set worker reply address."); destroy_header(&head); return -1; } } // Receive parts of message size_t prev = head.bodysiz; size_t msgsiz = 0; // Reallocate data if necessary if ((head.size + 1) > len) { if (allow_realloc) { char *t_data = (char*)realloc(*data, head.size + 1); if (t_data == NULL) { ygglog_error("comm_recv_multipart(%s): Failed to realloc buffer", x->name); free(*data); free_comm(xmulti); destroy_header(&head); return -1; } *data = t_data; } else { ygglog_error("comm_recv_multipart(%s): buffer is not large enough", x->name); free_comm(xmulti); destroy_header(&head); return -1; } } ret = -1; char *pos = (*data) + prev; while (prev < head.size) { msgsiz = head.size - prev + 1; ret = comm_recv_single(xmulti, &pos, msgsiz, 0); if (ret < 0) { ygglog_debug("comm_recv_multipart(%s): recv interupted at %d of %d bytes.", x->name, prev, head.size); break; } prev += ret; pos += ret; ygglog_debug("comm_recv_multipart(%s): %d of %d bytes received", x->name, prev, head.size); } if ((ret > 0) && (head.flags & HEAD_TYPE_IN_DATA)) { ygglog_debug("comm_recv_multipart(%s): Extracting type from data."); ret = parse_type_in_data(data, prev, &head); if (ret > 0) { prev = ret; ret = update_dtype(updtype, head.dtype); if (ret != 0) { ygglog_error("comm_recv_multipart(%s): Error updating existing datatype.", x->name); destroy_header(&head); return -1; } else { ret = (int)prev; } } } if (ret > 0) { ygglog_debug("comm_recv_multipart(%s): %d bytes completed", x->name, prev); ret = (int)prev; } free_comm(xmulti); } else { ygglog_debug("comm_recv_multipart(%s): Message not multipart", x->name); ret = (int)(head.bodysiz); } } if (ret >= 0) x->const_flags[0] = x->const_flags[0] | COMM_FLAGS_USED; destroy_header(&head); return ret; }; /*! @brief Receive a message from an input comm. An error will be returned if the buffer is not large enough. @param[in] x comm_t* structure that message should be sent to. @param[out] data character pointer to allocated buffer where the message should be saved. @param[in] len const size_t length of the allocated message buffer in bytes. @returns int -1 if message could not be received and -2 if EOF is received. Length of the received message otherwise. */ static int comm_recv(comm_t *x, char *data, const size_t len) { int ret = comm_recv_single(x, &data, len, 0); if (ret > 0) { if (is_eof(data)) { ygglog_debug("comm_recv(%s): EOF received.", x->name); x->const_flags[0] = x->const_flags[0] | COMM_EOF_RECV; ret = -2; } else { ret = comm_recv_multipart(x, &data, len, ret, 0); } } else { ygglog_error("comm_recv(%s): Failed to receive header or message.", x->name); } return ret; }; /*! @brief Receive a message from an input comm, reallocating as necessary. @param[in] x comm_t* structure that message should be sent to. @param[out] data character pointer to pointer to allocated buffer where the message should be saved. @param[in] len const size_t length of the allocated message buffer in bytes. @returns int -1 if message could not be received and -2 if EOF is received. Length of the received message otherwise. */ static int comm_recv_realloc(comm_t *x, char **data, const size_t len) { int ret = comm_recv_single(x, data, len, 1); if (ret > 0) { if (is_eof(*data)) { ygglog_debug("comm_recv_realloc(%s): EOF received.", x->name); x->const_flags[0] = x->const_flags[0] | COMM_EOF_RECV; ret = -2; } else { ret = comm_recv_multipart(x, data, len, ret, 1); } } else { ygglog_error("comm_recv_realloc(%s): Failed to receive header or message.", x->name); } return ret; }; /*! @brief alias for comm_send. */ static int comm_send_nolimit(const comm_t *x, const char *data, const size_t len) { return comm_send(x, data, len); }; /*! @brief Send EOF message to the comm. @param[in] x comm_t* structure that message should be sent to. @returns int 0 if send successfull, -1 otherwise. */ static int comm_send_nolimit_eof(const comm_t *x) { int ret = -1; if ((x == NULL) || (!(x->flags & COMM_FLAG_VALID))) { ygglog_error("comm_send_nolimit_eof: Invalid comm"); return ret; } if (x->const_flags == NULL) { ygglog_error("comm_send_nolimit_eof(%s): const_flags not initialized.", x->name); return ret; } if (!(x->const_flags[0] & COMM_EOF_SENT)) { char buf[2048] = YGG_MSG_EOF; ret = comm_send_nolimit(x, buf, strlen(buf)); set_sent_eof(x); } else { ygglog_debug("comm_send_nolimit_eof(%s): EOF already sent", x->name); } return ret; }; /*! @brief Receive a large message from an input comm. Receive a message larger than YGG_MSG_MAX bytes from an input comm by receiving it in parts. This expects the first message to be the size of the total message. @param[in] x comm_t structure that message should be sent to. @param[out] data character pointer to pointer for allocated buffer where the message should be stored. A pointer to a pointer is used so that the buffer may be reallocated as necessary for the incoming message. @param[in] len size_t length of the initial allocated message buffer in bytes. @returns int -1 if message could not be received and -2 if EOF is received. Length of the received message otherwise. */ static int comm_recv_nolimit(comm_t *x, char **data, const size_t len) { return comm_recv_realloc(x, data, len); }; /*! @brief Send arguments as a small formatted message to an output comm. Use the format string to create a message from the input arguments that is then sent to the specified output comm. If the message is larger than YGG_MSG_MAX or cannot be encoded, it will not be sent. @param[in] x comm_t* structure for comm that message should be sent to. @param[in] nargs size_t Number of arguments in the variable argument list. @param[in] ap va_list arguments to be formatted into a message using sprintf. @returns int Number of arguments formatted if send succesfull, -1 if send unsuccessful. */ static int vcommSend(const comm_t *x, size_t nargs, va_list_t ap) { ygglog_debug("vcommSend: Formatting %lu arguments.", nargs); int ret = -1; if ((x == NULL) || (!(x->flags & COMM_FLAG_VALID))) { ygglog_error("vcommSend: Invalid comm"); return ret; } size_t buf_siz = YGG_MSG_BUF; // char *buf = NULL; char *buf = (char*)malloc(buf_siz); if (buf == NULL) { ygglog_error("vcommSend(%s): Failed to alloc buffer", x->name); return -1; } dtype_t *datatype = x->datatype; if (x->type == CLIENT_COMM) { comm_t *handle = (comm_t*)(x->handle); datatype = handle->datatype; } // Update datatype if not yet set and object being sent includes type if (update_dtype_from_generic_ap(datatype, nargs, ap) < 0) { return -1; } size_t nargs_orig = nargs; ret = serialize_dtype(datatype, &buf, &buf_siz, 1, &nargs, ap); if (ret < 0) { ygglog_error("vcommSend(%s): serialization error", x->name); free(buf); return -1; } ret = comm_send(x, buf, ret); ygglog_debug("vcommSend(%s): comm_send returns %d, nargs (remaining) = %d", x->name, ret, nargs); free(buf); if (ret < 0) { return ret; } else { return (int)(nargs_orig - nargs); } }; /*! @brief Send arguments as a formatted message to an output comm. Use the format string to create a message from the input arguments that is then sent to the specified output comm. @param[in] x comm_t structure for comm that message should be sent to. @param[in] nargs size_t Number of variable arguments provided. @param[in] ... Arguments to be formatted into a message using sprintf. @returns int Number of arguments formatted if send succesfull, -1 if send unsuccessful. */ static int ncommSend(const comm_t *x, size_t nargs, ...) { va_list_t ap = init_va_list(); va_start(ap.va, nargs); ygglog_debug("ncommSend: nargs = %d", nargs); int ret = vcommSend(x, nargs, ap); va_end(ap.va); return ret; }; #define commSend(x, ...) ncommSend(x, COUNT_VARARGS(__VA_ARGS__), __VA_ARGS__) /*! @brief Assign arguments by receiving and parsing a message from an input comm. Receive a message smaller than YGG_MSG_MAX bytes from an input comm and parse it using the associated format string. @param[in] x comm_t structure for comm that message should be sent to. @param[in] allow_realloc int If 1, variables being filled are assumed to be pointers to pointers for heap memory. If 0, variables are assumed to be pointers to stack memory. If allow_realloc is set to 1, but stack variables are passed, a segfault can occur. @param[in] nargs size_t Number of arguments in the variable argument list. @param[out] ap va_list arguments that should be assigned by parsing the received message using sscanf. As these are being assigned, they should be pointers to memory that has already been allocated. @returns int -1 if message could not be received or could not be parsed. Length of the received message if message was received and parsed. -2 is returned if EOF is received. */ static int vcommRecv(comm_t *x, const int allow_realloc, size_t nargs, va_list_t ap) { int ret = -1; ygglog_debug("vcommRecv: Parsing %lu arguments.", nargs); if ((x == NULL) || (!(x->flags & COMM_FLAG_VALID))) { ygglog_error("vcommRecv: Invalid comm"); return ret; } // Receive message size_t buf_siz = YGG_MSG_BUF; /* char *buf = NULL; */ char *buf = (char*)malloc(buf_siz); if (buf == NULL) { ygglog_error("vcommRecv(%s): Failed to alloc buffer", x->name); return -1; } ret = comm_recv_nolimit(x, &buf, buf_siz); if (ret < 0) { // ygglog_error("vcommRecv(%s): Error receiving.", x->name); free(buf); return ret; } ygglog_debug("vcommRecv(%s): comm_recv returns %d: %.10s...", x->name, ret, buf); // Deserialize message dtype_t *datatype = x->datatype; if (x->type == SERVER_COMM) { comm_t *handle = (comm_t*)(x->handle); datatype = handle->datatype; } ret = deserialize_dtype(datatype, buf, ret, allow_realloc, &nargs, ap); if (ret < 0) { ygglog_error("vcommRecv(%s): error deserializing message (ret=%d)", x->name, ret); free(buf); return -1; } ygglog_debug("vcommRecv(%s): deserialize_format returns %d", x->name, ret); free(buf); return ret; }; /*! @brief Assign arguments by receiving and parsing a message from an input comm. Receive a message from an input comm and parse it using the associated type. @param[in] x comm_t* structure for comm that message should be sent to. @param[in] allow_realloc int If 1, variables being filled are assumed to be pointers to pointers for heap memory. If 0, variables are assumed to be pointers to stack memory. If allow_realloc is set to 1, but stack variables are passed, a segfault can occur. @param[in] nargs size_t Number of variable arguments provided. @param[out] ... arguments that should be assigned by parsing the received message using sscanf. As these are being assigned, they should be pointers to memory that has already been allocated. @returns int -1 if message could not be received or could not be parsed. Length of the received message if message was received and parsed. -2 is returned if EOF is received. */ static int ncommRecv(comm_t *x, const int allow_realloc, size_t nargs, ...) { va_list_t ap = init_va_list(); va_start(ap.va, nargs); ygglog_debug("ncommRecv: nargs = %d", nargs); int ret = vcommRecv(x, allow_realloc, nargs, ap); va_end(ap.va); return ret; }; #define commRecvStack(x, ...) ncommRecv(x, 0, COUNT_VARARGS(__VA_ARGS__), __VA_ARGS__) #define commRecvHeap(x, ...) ncommRecv(x, 1, COUNT_VARARGS(__VA_ARGS__), __VA_ARGS__) #define commRecv commRecvStack #define commRecvRealloc commRecvHeap #define vcommSend_nolimit vcommSend #define vcommRecv_nolimit vcommRecv #ifdef __cplusplus /* If this is a C++ compiler, end C linkage */ } #endif #endif /*YGGCOMMUNICATION_H_*/
common.h
#ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <cstdio> #include <string> #include <vector> #include <sstream> #include <cstdint> #include <algorithm> #include <cmath> #include <functional> #include <memory> #include <iterator> #include <type_traits> #include <iomanip> #ifdef _MSC_VER #include "intrin.h" #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9', '1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9', '2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9', '3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9', '4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9', '5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9', '6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9', '7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9', '8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9', '9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = char(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t #ifdef _MSC_VER buffer_len #endif ) { #ifdef _MSC_VER sprintf_s(buffer, buffer_len, "%.17g", value); #else sprintf(buffer, "%.17g", value); #endif } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t ) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t #ifdef _MSC_VER buf_len #endif ) const { #ifdef _MSC_VER sprintf_s(buffer, buf_len, "%g", value); #else sprintf(buffer, "%g", value); #endif } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t ) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK(strs.size() == static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out= static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformaton on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (size_t i = 0; i < input.size(); ++i) { ret.push_back(input.at(i).get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; for (size_t i = start; i < keys.size(); ++i) { arr.emplace_back(keys[i], values[i]); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { keys[i] = arr[i].first; values[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>& data) { std::vector<T*> ptr(data.size()); for (size_t i = 0; i < data.size(); ++i) { ptr[i] = data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (x >= 1e300) { return 1e300; } else if(x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static,1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static,1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
collatzconjecture.c
#include <omp.h> #include <stdlib.h> #include <stdio.h> int main (int argc, char* argv[]) { long long Nmax = atoll(argv[1]); long long Imax = Nmax; long long n; long long i, j; long long high = 0; double startTime, endTime; startTime = omp_get_wtime(); #pragma omp parallel for schedule(static, 500) private (n, i) reduction(max:high) for (j = 1; j < Nmax; j++) { n=j; for (i = 1; i < Imax; i++) { if (n % 2 == 0) { n = n/2; } else { n = 3*n + 1; } if (n > high) high = n; if (n==1) break; } } endTime = omp_get_wtime(); printf("High: %lld\n", high); printf("Runtime: %.16f\n", endTime - startTime); return 0; }
residualbased_elimination_builder_and_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER ) #define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER /* System includes */ #include <set> #include <unordered_set> #ifdef _OPENMP #include <omp.h> #endif /* External includes */ /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedEliminationBuilderAndSolver * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedEliminationBuilderAndSolver : public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolver); typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef Node<3> NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor. (with parameters) */ explicit ResidualBasedEliminationBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) : BaseType(pNewLinearSystemSolver) { // Validate default parameters Parameters default_parameters = Parameters(R"( { "name" : "ResidualBasedEliminationBuilderAndSolver" })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); } /** * @brief Constructor. */ explicit ResidualBasedEliminationBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BaseType(pNewLinearSystemSolver) { // KRATOS_INFO("ResidualBasedEliminationBuilderAndSolver") << "Using the standard builder and solver " << std::endl; } /** Destructor. */ ~ResidualBasedEliminationBuilderAndSolver() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Function to perform the build of the RHS. The vector could be sized as the total number * of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param b The RHS vector */ void Build( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& b) override { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //getting the elements from the model const int nelements = static_cast<int>(rModelPart.Elements().size()); //getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin(); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; const double start_build = OpenMPUtils::GetCurrentTime(); // assemble all elements #pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId ) { #pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; k++) { ModelPart::ElementsContainerType::iterator it = el_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool element_is_active = true; if ((it)->IsDefined(ACTIVE)) element_is_active = (it)->Is(ACTIVE); if (element_is_active) { //calculate elemental contribution pScheme->CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mLockArray); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif // clean local elemental memory pScheme->CleanMemory(*(it.base())); } } #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; k++) { ModelPart::ConditionsContainerType::iterator it = cond_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool condition_is_active = true; if ((it)->IsDefined(ACTIVE)) condition_is_active = (it)->Is(ACTIVE); if (condition_is_active) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mLockArray); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif // clean local elemental memory pScheme->CleanMemory(*(it.base())); } } } const double stop_build = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System build time: " << stop_build - start_build << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished building" << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building of the LHS * @details Depending on the implementation choosen the size of the matrix could * be equal to the total number of Dofs or to the number of unrestrained dofs * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix */ void BuildLHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A) override { KRATOS_TRY //getting the elements from the model ElementsArrayType& rElements = rModelPart.Elements(); //getting the array of the conditions ConditionsArrayType& rConditions = rModelPart.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); // assemble all elements for (typename ElementsArrayType::ptr_iterator it = rElements.ptr_begin(); it != rElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A, LHS_Contribution, EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0, 0, false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it = rConditions.ptr_begin(); it != rConditions.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A, LHS_Contribution, EquationId); } KRATOS_CATCH("") } /** * @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom * and "N" is the total number of degrees of freedom involved. * @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed * degrees of freedom (but keeping the columns!!) * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix */ void BuildLHS_CompleteOnFreeRows( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A) override { KRATOS_TRY //getting the elements from the model ElementsArrayType& rElements = rModelPart.Elements(); //getting the array of the conditions ConditionsArrayType& rConditions = rModelPart.Conditions(); ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //resetting to zero the vector of reactions TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements for (typename ElementsArrayType::ptr_iterator it = rElements.ptr_begin(); it != rElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution AssembleLHS_CompleteOnFreeRows(A, LHS_Contribution, EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0, 0, false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it = rConditions.ptr_begin(); it != rConditions.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution AssembleLHS_CompleteOnFreeRows(A, LHS_Contribution, EquationId); } KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void SystemSolve( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else TSparseSpace::SetToZero(Dx); // Prints informations about the current time KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, ModelPart& rModelPart ) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //provide physical data as needed if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() ) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart); //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else { TSparseSpace::SetToZero(Dx); KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY Timer::Start("Build"); Build(pScheme, rModelPart, A, b); Timer::Stop("Build"); // ApplyPointLoads(pScheme,rModelPart,b); // Does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; const double start_solve = OpenMPUtils::GetCurrentTime(); Timer::Start("Solve"); SystemSolveWithPhysics(A, Dx, b, rModelPart); Timer::Stop("Solve"); const double stop_solve = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; KRATOS_CATCH("") } /** * @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildRHSAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY BuildRHS(pScheme, rModelPart, b); SystemSolve(A, Dx, b); KRATOS_CATCH("") } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& b) override { KRATOS_TRY //resetting to zero the vector of reactions if(BaseType::mCalculateReactionsFlag) { TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); } //Getting the Elements ElementsArrayType& pElements = rModelPart.Elements(); //getting the array of the conditions ConditionsArrayType& pConditions = rModelPart.Conditions(); ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //contributions to the system LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType EquationId; // assemble all elements #pragma omp parallel firstprivate( RHS_Contribution, EquationId) { const int nelements = static_cast<int>(pElements.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i<nelements; i++) { typename ElementsArrayType::iterator it = pElements.begin() + i; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool element_is_active = true; if ((it)->IsDefined(ACTIVE)) element_is_active = (it)->Is(ACTIVE); if (element_is_active) { // Calculate elemental Right Hand Side Contribution pScheme->Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo); // Assemble the elemental contribution AssembleRHS(b, RHS_Contribution, EquationId); } } // assemble all conditions const int nconditions = static_cast<int>(pConditions.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i<nconditions; i++) { auto it = pConditions.begin() + i; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool condition_is_active = true; if ((it)->IsDefined(ACTIVE)) condition_is_active = (it)->Is(ACTIVE); if (condition_is_active) { //calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b, RHS_Contribution, EquationId); } } } KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) override { KRATOS_TRY; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl; //Gets the array of elements from the modeler ElementsArrayType& pElements = rModelPart.Elements(); const int nelements = static_cast<int>(pElements.size()); Element::DofsVectorType ElementalDofList; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); unsigned int nthreads = OpenMPUtils::GetNumThreads(); // typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type; // typedef std::unordered_set < NodeType::DofType::Pointer, // DofPointerHasher, // DofPointerComparor, // allocator_type > set_type; typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type; std::vector<set_type> dofs_aux_list(nthreads); // std::vector<allocator_type> allocators(nthreads); for (int i = 0; i < static_cast<int>(nthreads); i++) { // dofs_aux_list[i] = set_type( allocators[i]); dofs_aux_list[i].reserve(nelements); } #pragma omp parallel for firstprivate(nelements, ElementalDofList) for (int i = 0; i < static_cast<int>(nelements); i++) { typename ElementsArrayType::iterator it = pElements.begin() + i; const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // gets list of Dof involved on every element pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } ConditionsArrayType& pConditions = rModelPart.Conditions(); const int nconditions = static_cast<int>(pConditions.size()); #pragma omp parallel for firstprivate(nconditions, ElementalDofList) for (int i = 0; i < nconditions; i++) { typename ConditionsArrayType::iterator it = pConditions.begin() + i; const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // gets list of Dof involved on every element pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } //here we do a reduction in a tree so to have everything on thread 0 unsigned int old_max = nthreads; unsigned int new_max = ceil(0.5*static_cast<double>(old_max)); while (new_max >= 1 && new_max != old_max) { // //just for debugging // std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl; // for (int i = 0; i < new_max; i++) // { // if (i + new_max < old_max) // { // std::cout << i << " - " << i + new_max << std::endl; // } // } // std::cout << "********************" << std::endl; #pragma omp parallel for for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end()); dofs_aux_list[i + new_max].clear(); } } old_max = new_max; new_max = ceil(0.5*static_cast<double>(old_max)); } DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); Doftemp.reserve(dofs_aux_list[0].size()); for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++) { Doftemp.push_back(it->get()); } Doftemp.Sort(); BaseType::mDofSet = Doftemp; // Throws an execption if there are no Degrees of freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl; #ifdef USE_LOCKS_IN_ASSEMBLY if (mLockArray.size() != 0) { for (int i = 0; i < static_cast<int>(mLockArray.size()); i++) omp_destroy_lock(&mLockArray[i]); } mLockArray.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mLockArray.size()); i++) omp_init_lock(&mLockArray[i]); #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl << "Node : "<<dof_iterator->Id()<< std::endl << "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem( ModelPart& rModelPart ) override { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { KRATOS_TRY if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0)); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType& A = *pA; TSystemVectorType& Dx = *pDx; TSystemVectorType& b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, A, rModelPart); } else { if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { //KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructure(pScheme, A, rModelPart); } } if (Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize, false); if (b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize, false); //if needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag == true) { const std::size_t reactions_vector_size = BaseType::mDofSet.size() - BaseType::mEquationSystemSize; if (BaseType::mpReactionsVector->size() != reactions_vector_size) BaseType::mpReactionsVector->resize(reactions_vector_size, false); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { //refresh RHS to have the correct reactions BuildRHS(pScheme, rModelPart, b); // Updating variables std::size_t i; TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; for (auto it2 = BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2) { i = (*it2)->EquationId(); if (i >= BaseType::mEquationSystemSize) { i -= BaseType::mEquationSystemSize; (*it2)->GetSolutionStepReactionValue() = -r_reactions_vector[i]; } } } /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { this->mDofSet = DofsArrayType(); this->mpReactionsVector.reset(); // this->mReactionsVector = TSystemVectorType(); this->mpLinearSystemSolver->Clear(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart& rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedEliminationBuilderAndSolver"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ #ifdef USE_LOCKS_IN_ASSEMBLY std::vector<omp_lock_t> mLockArray; #endif ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This function does the assembling of the LHS and RHS * @note The main difference respect the block builder and solver is the fact that the fixed DoFs are not considered on the assembling */ void Assemble( TSystemMatrixType& A, TSystemVectorType& b, const LocalSystemMatrixType& LHS_Contribution, const LocalSystemVectorType& RHS_Contribution, const Element::EquationIdVectorType& EquationId #ifdef USE_LOCKS_IN_ASSEMBLY ,std::vector< omp_lock_t >& lock_array #endif ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef USE_LOCKS_IN_ASSEMBLY omp_set_lock(&lock_array[i_global]); b[i_global] += RHS_Contribution(i_local); #else double& r_a = b[i_global]; const double& v_a = RHS_Contribution(i_local); #pragma omp atomic r_a += v_a; #endif AssembleRowContributionFreeDofs(A, LHS_Contribution, i_global, i_local, EquationId); #ifdef USE_LOCKS_IN_ASSEMBLY omp_unset_lock(&lock_array[i_global]); #endif } //note that computation of reactions is not performed here! } } //************************************************************************** virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& A, ModelPart& rModelPart) { //filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); const std::size_t equation_size = BaseType::mEquationSystemSize; std::vector<std::unordered_set<std::size_t> > indices(equation_size); #pragma omp parallel for firstprivate(equation_size) for (int iii = 0; iii < static_cast<int>(equation_size); iii++) { indices[iii].reserve(40); } Element::EquationIdVectorType ids(3, 0); #pragma omp parallel firstprivate(ids) { // The process info ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We repeat the same declaration for each thead std::vector<std::unordered_set<std::size_t> > temp_indexes(equation_size); #pragma omp for for (int index = 0; index < static_cast<int>(equation_size); ++index) temp_indexes[index].reserve(30); // Getting the size of the array of elements from the model const int number_of_elements = static_cast<int>(rModelPart.Elements().size()); // Element initial iterator const auto el_begin = rModelPart.ElementsBegin(); // We iterate over the elements #pragma omp for schedule(guided, 512) nowait for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) { auto it_elem = el_begin + i_elem; pScheme->EquationId( *(it_elem.base()), ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) if (id_j < BaseType::mEquationSystemSize) row_indices.insert(id_j); } } } // Getting the size of the array of the conditions const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size()); // Condition initial iterator const auto cond_begin = rModelPart.ConditionsBegin(); // We iterate over the conditions #pragma omp for schedule(guided, 512) nowait for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) { auto it_cond = cond_begin + i_cond; pScheme->Condition_EquationId( *(it_cond.base()), ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) if (id_j < BaseType::mEquationSystemSize) row_indices.insert(id_j); } } } // Merging all the temporal indexes #pragma omp critical { for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) { indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end()); } } } //count the row sizes unsigned int nnz = 0; for (unsigned int i = 0; i < indices.size(); i++) nnz += indices[i].size(); A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); double* Avalues = A.value_data().begin(); std::size_t* Arow_indices = A.index1_data().begin(); std::size_t* Acol_indices = A.index2_data().begin(); //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(A.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(A.size1()); i++) { const unsigned int row_begin = Arow_indices[i]; const unsigned int row_end = Arow_indices[i + 1]; unsigned int k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); it++) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } A.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } // virtual void ConstructMatrixStructure( // TSystemMatrixType& A, // ElementsContainerType& rElements, // ConditionsArrayType& rConditions, // ProcessInfo& CurrentProcessInfo) // { // // std::size_t equation_size = A.size1(); // std::vector<std::vector<std::size_t> > indices(equation_size); // // std::vector<std::vector<std::size_t> > dirichlet_indices(TSystemSpaceType::Size1(mDirichletMatrix)); // // Element::EquationIdVectorType ids(3, 0); // for (typename ElementsContainerType::iterator i_element = rElements.begin(); i_element != rElements.end(); i_element++) // { // (i_element)->EquationIdVector(ids, CurrentProcessInfo); // // for (std::size_t i = 0; i < ids.size(); i++) // if (ids[i] < equation_size) // { // std::vector<std::size_t>& row_indices = indices[ids[i]]; // for (std::size_t j = 0; j < ids.size(); j++) // if (ids[j] < equation_size) // { // AddUnique(row_indices, ids[j]); // //indices[ids[i]].push_back(ids[j]); // } // } // // } // // for (typename ConditionsArrayType::iterator i_condition = rConditions.begin(); i_condition != rConditions.end(); i_condition++) // { // (i_condition)->EquationIdVector(ids, CurrentProcessInfo); // for (std::size_t i = 0; i < ids.size(); i++) // if (ids[i] < equation_size) // { // std::vector<std::size_t>& row_indices = indices[ids[i]]; // for (std::size_t j = 0; j < ids.size(); j++) // if (ids[j] < equation_size) // { // AddUnique(row_indices, ids[j]); // // indices[ids[i]].push_back(ids[j]); // } // } // } // // //allocating the memory needed // int data_size = 0; // for (std::size_t i = 0; i < indices.size(); i++) // { // data_size += indices[i].size(); // } // A.reserve(data_size, false); // // //filling with zero the matrix (creating the structure) // Timer::Start("MatrixStructure"); //#ifndef _OPENMP // for (std::size_t i = 0; i < indices.size(); i++) // { // std::vector<std::size_t>& row_indices = indices[i]; // std::sort(row_indices.begin(), row_indices.end()); // // for (std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end(); it++) // { // A.push_back(i, *it, 0.00); // } // row_indices.clear(); // } //#else // int number_of_threads = omp_get_max_threads(); // vector<unsigned int> matrix_partition; // CreatePartition(number_of_threads, indices.size(), matrix_partition); // if (this->GetEchoLevel() > 2) // { // KRATOS_WATCH(matrix_partition); // } // for (int k = 0; k < number_of_threads; k++) // { // #pragma omp parallel // if (omp_get_thread_num() == k) // { // for (std::size_t i = matrix_partition[k]; i < matrix_partition[k + 1]; i++) // { // std::vector<std::size_t>& row_indices = indices[i]; // std::sort(row_indices.begin(), row_indices.end()); // // for (std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end(); it++) // { // A.push_back(i, *it, 0.00); // } // row_indices.clear(); // } // } // } //#endif // Timer::Stop("MatrixStructure"); // } //************************************************************************** void AssembleLHS( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } /** * @brief This function is equivalent to the AssembleRowContribution of the block builder and solver * @note The main difference respect the block builder and solver is the fact that the fixed DoFs are skipped */ inline void AssembleRowContributionFreeDofs(TSystemMatrixType& A, const Matrix& Alocal, const std::size_t i, const std::size_t i_local, const Element::EquationIdVectorType& EquationId) { double* values_vector = A.value_data().begin(); std::size_t* index1_vector = A.index1_data().begin(); std::size_t* index2_vector = A.index2_data().begin(); const std::size_t left_limit = index1_vector[i]; // Find the first entry // We iterate over the equation ids until we find the first equation id to be considered // We count in which component we find an ID std::size_t last_pos = 0; std::size_t last_found = 0; std::size_t counter = 0; for(std::size_t j=0; j < EquationId.size(); ++j) { ++counter; const std::size_t j_global = EquationId[j]; if (j_global < BaseType::mEquationSystemSize) { last_pos = ForwardFind(j_global,left_limit,index2_vector); last_found = j_global; break; } } // If the counter is equal to the size of the EquationID vector that means that only one dof will be considered, if the number is greater means that all the dofs are fixed. If the number is below means that at we have several dofs free to be considered if (counter <= EquationId.size()) { #ifndef USE_LOCKS_IN_ASSEMBLY double& r_a = values_vector[last_pos]; const double& v_a = Alocal(i_local,counter - 1); #pragma omp atomic r_a += v_a; #else values_vector[last_pos] += Alocal(i_local,counter - 1); #endif // Now find all of the other entries std::size_t pos = 0; for(std::size_t j = counter; j < EquationId.size(); ++j) { std::size_t id_to_find = EquationId[j]; if (id_to_find < BaseType::mEquationSystemSize) { if(id_to_find > last_found) pos = ForwardFind(id_to_find,last_pos+1,index2_vector); else if(id_to_find < last_found) pos = BackwardFind(id_to_find,last_pos-1,index2_vector); else pos = last_pos; #ifndef USE_LOCKS_IN_ASSEMBLY double& r = values_vector[pos]; const double& v = Alocal(i_local,j); #pragma omp atomic r += v; #else values_vector[pos] += Alocal(i_local,j); #endif last_found = id_to_find; last_pos = pos; } } } } inline std::size_t ForwardFind(const std::size_t id_to_find, const std::size_t start, const std::size_t* index_vector) { std::size_t pos = start; while(id_to_find != index_vector[pos]) pos++; return pos; } inline std::size_t BackwardFind(const std::size_t id_to_find, const std::size_t start, const std::size_t* index_vector) { std::size_t pos = start; while(id_to_find != index_vector[pos]) pos--; return pos; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } void AssembleRHS( TSystemVectorType& b, const LocalSystemVectorType& RHS_Contribution, const Element::EquationIdVectorType& EquationId ) { unsigned int local_size = RHS_Contribution.size(); if (BaseType::mCalculateReactionsFlag == false) { for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double& b_value = b[i_global]; const double& rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } else { TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector; for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double& b_value = b[i_global]; const double& rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } else //fixed dof { double& b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize]; const double& rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } } //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { int j_global = EquationId[j_local]; A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedEliminationBuilderAndSolver */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
residualbased_newton_raphson_strategy.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY // System includes // External includes // Project includes #include "includes/define.h" #include "solving_strategies/strategies/solving_strategy.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/builtin_timer.h" //default builder and solver #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonStrategy * @ingroup KratosCore * @brief This is the base Newton Raphson strategy * @details This strategy iterates until the convergence is achieved (or the maximum number of iterations is surpassed) using a Newton Raphson algorithm * @author Riccardo Rossi */ template <class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; // Counted pointer of ClassName KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedNewtonRaphsonStrategy); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor */ explicit ResidualBasedNewtonRaphsonStrategy() : BaseType() { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit ResidualBasedNewtonRaphsonStrategy(ModelPart& rModelPart) : ResidualBasedNewtonRaphsonStrategy(rModelPart, ResidualBasedNewtonRaphsonStrategy::GetDefaultParameters()) { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit ResidualBasedNewtonRaphsonStrategy(ModelPart& rModelPart, Parameters ThisParameters) : BaseType(rModelPart), mSolutionStepIsInitialized(false), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); if (p_builder_and_solver != nullptr) { // Tells to the builder and solver if the reactions have to be Calculated or not p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to // be reshaped at each step or not p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); } else { KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "BuilderAndSolver is not initialized. Please assign one before settings flags" << std::endl; } mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); } /** * Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, int MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false) : BaseType(rModelPart, MoveMeshFlag), mpScheme(pScheme), mpConvergenceCriteria(pNewConvergenceCriteria), mReformDofSetAtEachStep(ReformDofSetAtEachStep), mCalculateReactionsFlag(CalculateReactions), mSolutionStepIsInitialized(false), mMaxIterationNumber(MaxIterations), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY; // Setting up the default builder and solver mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer( new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSolver)); // Tells to the builder and solver if the reactions have to be Calculated or not mpBuilderAndSolver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to // be reshaped at each step or not mpBuilderAndSolver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH(""); } /** * @brief Constructor specifying the builder and solver * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, int MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false) : BaseType(rModelPart, MoveMeshFlag), mpScheme(pScheme), mpBuilderAndSolver(pNewBuilderAndSolver), mpConvergenceCriteria(pNewConvergenceCriteria), mReformDofSetAtEachStep(ReformDofSetAtEachStep), mCalculateReactionsFlag(CalculateReactions), mSolutionStepIsInitialized(false), mMaxIterationNumber(MaxIterations), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); // Tells to the builder and solver if the reactions have to be Calculated or not p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to //be reshaped at each step or not p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH("") } /** * @brief Constructor specifying the builder and solver * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param MaxIterations The maximum number of non-linear iterations to be considered when solving the problem * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ KRATOS_DEPRECATED_MESSAGE("Constructor deprecated, please use the constructor without linear solver") explicit ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, int MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag) { KRATOS_TRY KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "This constructor is deprecated, please use the constructor without linear solver" << std::endl; // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); // We check if the linear solver considered for the builder and solver is consistent auto p_linear_solver = p_builder_and_solver->GetLinearSystemSolver(); KRATOS_ERROR_IF(p_linear_solver != pNewLinearSolver) << "Inconsistent linear solver in strategy and builder and solver. Considering the linear solver assigned to builder and solver :\n" << p_linear_solver->Info() << "\n instead of:\n" << pNewLinearSolver->Info() << std::endl; KRATOS_CATCH("") } /** * Constructor with Parameters * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param Settings Settings used in the strategy */ ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, Parameters Settings) : BaseType(rModelPart), mpScheme(pScheme), mpConvergenceCriteria(pNewConvergenceCriteria), mSolutionStepIsInitialized(false), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY; // Setting up the default builder and solver mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer( new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSolver)); // Tells to the builder and solver if the reactions have to be Calculated or not mpBuilderAndSolver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to // be reshaped at each step or not mpBuilderAndSolver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH(""); } /** * @brief Constructor specifying the builder and solver and using Parameters * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param Settings Settings used in the strategy */ ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, Parameters Settings) : BaseType(rModelPart), mpScheme(pScheme), mpBuilderAndSolver(pNewBuilderAndSolver), mpConvergenceCriteria(pNewConvergenceCriteria), mSolutionStepIsInitialized(false), mInitializeWasPerformed(false), mKeepSystemConstantDuringIterations(false) { KRATOS_TRY // Validate and assign defaults Settings = this->ValidateAndAssignParameters(Settings, this->GetDefaultParameters()); this->AssignSettings(Settings); // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); // Tells to the builder and solver if the reactions have to be Calculated or not p_builder_and_solver->SetCalculateReactionsFlag(mCalculateReactionsFlag); // Tells to the Builder And Solver if the system matrix and vectors need to //be reshaped at each step or not p_builder_and_solver->SetReshapeMatrixFlag(mReformDofSetAtEachStep); // Set EchoLevel to the default value (only time is displayed) SetEchoLevel(1); // By default the matrices are rebuilt at each iteration this->SetRebuildLevel(2); mpA = TSparseSpace::CreateEmptyMatrixPointer(); mpDx = TSparseSpace::CreateEmptyVectorPointer(); mpb = TSparseSpace::CreateEmptyVectorPointer(); KRATOS_CATCH("") } /** * @brief Constructor specifying the builder and solver and using Parameters * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param pNewBuilderAndSolver The builder and solver employed * @param Parameters Settings used in the strategy */ KRATOS_DEPRECATED_MESSAGE("Constructor deprecated, please use the constructor without linear solver") ResidualBasedNewtonRaphsonStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, Parameters Settings) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, Settings) { KRATOS_TRY KRATOS_WARNING("ResidualBasedNewtonRaphsonStrategy") << "This constructor is deprecated, please use the constructor without linear solver" << std::endl; // Getting builder and solver auto p_builder_and_solver = GetBuilderAndSolver(); // We check if the linear solver considered for the builder and solver is consistent auto p_linear_solver = p_builder_and_solver->GetLinearSystemSolver(); KRATOS_ERROR_IF(p_linear_solver != pNewLinearSolver) << "Inconsistent linear solver in strategy and builder and solver. Considering the linear solver assigned to builder and solver :\n" << p_linear_solver->Info() << "\n instead of:\n" << pNewLinearSolver->Info() << std::endl; KRATOS_CATCH("") } /** * @brief Destructor. * @details In trilinos third party library, the linear solver's preconditioner should be freed before the system matrix. We control the deallocation order with Clear(). */ ~ResidualBasedNewtonRaphsonStrategy() override { // If the linear solver has not been deallocated, clean it before // deallocating mpA. This prevents a memory error with the the ML // solver (which holds a reference to it). // NOTE: The linear solver is hold by the B&S auto p_builder_and_solver = this->GetBuilderAndSolver(); if (p_builder_and_solver != nullptr) { p_builder_and_solver->Clear(); } // Deallocating system vectors to avoid errors in MPI. Clear calls // TrilinosSpace::Clear for the vectors, which preserves the Map of // current vectors, performing MPI calls in the process. Due to the // way Python garbage collection works, this may happen after // MPI_Finalize has already been called and is an error. Resetting // the pointers here prevents Clear from operating with the // (now deallocated) vectors. mpA.reset(); mpDx.reset(); mpb.reset(); Clear(); } /** * @brief Set method for the time scheme * @param pScheme The pointer to the time scheme considered */ void SetScheme(typename TSchemeType::Pointer pScheme) { mpScheme = pScheme; }; /** * @brief Get method for the time scheme * @return mpScheme: The pointer to the time scheme considered */ typename TSchemeType::Pointer GetScheme() { return mpScheme; }; /** * @brief Set method for the builder and solver * @param pNewBuilderAndSolver The pointer to the builder and solver considered */ void SetBuilderAndSolver(typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver) { mpBuilderAndSolver = pNewBuilderAndSolver; }; /** * @brief Get method for the builder and solver * @return mpBuilderAndSolver: The pointer to the builder and solver considered */ typename TBuilderAndSolverType::Pointer GetBuilderAndSolver() { return mpBuilderAndSolver; }; /** * @brief This method sets the flag mInitializeWasPerformed * @param InitializePerformedFlag The flag that tells if the initialize has been computed */ void SetInitializePerformedFlag(bool InitializePerformedFlag = true) { mInitializeWasPerformed = InitializePerformedFlag; } /** * @brief This method gets the flag mInitializeWasPerformed * @return mInitializeWasPerformed: The flag that tells if the initialize has been computed */ bool GetInitializePerformedFlag() { return mInitializeWasPerformed; } /** * @brief This method sets the flag mCalculateReactionsFlag * @param CalculateReactionsFlag The flag that tells if the reactions are computed */ void SetCalculateReactionsFlag(bool CalculateReactionsFlag) { mCalculateReactionsFlag = CalculateReactionsFlag; } /** * @brief This method returns the flag mCalculateReactionsFlag * @return The flag that tells if the reactions are computed */ bool GetCalculateReactionsFlag() { return mCalculateReactionsFlag; } /** * @brief This method sets the flag mFullUpdateFlag * @param UseOldStiffnessInFirstIterationFlag The flag that tells if */ void SetUseOldStiffnessInFirstIterationFlag(bool UseOldStiffnessInFirstIterationFlag) { mUseOldStiffnessInFirstIteration = UseOldStiffnessInFirstIterationFlag; } /** * @brief This method returns the flag mFullUpdateFlag * @return The flag that tells if */ bool GetUseOldStiffnessInFirstIterationFlag() { return mUseOldStiffnessInFirstIteration; } /** * @brief This method sets the flag mReformDofSetAtEachStep * @param Flag The flag that tells if each time step the system is rebuilt */ void SetReformDofSetAtEachStepFlag(bool Flag) { mReformDofSetAtEachStep = Flag; GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep); } /** * @brief This method returns the flag mReformDofSetAtEachStep * @return The flag that tells if each time step the system is rebuilt */ bool GetReformDofSetAtEachStepFlag() { return mReformDofSetAtEachStep; } /** * @brief This method sets the flag mMaxIterationNumber * @param MaxIterationNumber This is the maximum number of on linear iterations */ void SetMaxIterationNumber(unsigned int MaxIterationNumber) { mMaxIterationNumber = MaxIterationNumber; } /** * @brief This method gets the flag mMaxIterationNumber * @return mMaxIterationNumber: This is the maximum number of on linear iterations */ unsigned int GetMaxIterationNumber() { return mMaxIterationNumber; } /** * @brief It sets the level of echo for the solving strategy * @param Level The level to set * @details The different levels of echo are: * - 0: Mute... no echo at all * - 1: Printing time and basic informations * - 2: Printing linear solver data * - 3: Print of debug informations: Echo of stiffness matrix, Dx, b... */ void SetEchoLevel(int Level) override { BaseType::mEchoLevel = Level; GetBuilderAndSolver()->SetEchoLevel(Level); } //********************************************************************************* /**OPERATIONS ACCESSIBLE FROM THE INPUT: **/ /** * @brief Create method * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ typename BaseType::Pointer Create( ModelPart& rModelPart, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(rModelPart, ThisParameters); } /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY const DataCommunicator &r_comm = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator(); //OPERATIONS THAT SHOULD BE DONE ONCE - internal check to avoid repetitions //if the operations needed were already performed this does nothing if (mInitializeWasPerformed == false) Initialize(); //initialize solution step if (mSolutionStepIsInitialized == false) InitializeSolutionStep(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; DofsArrayType& r_dof_set = GetBuilderAndSolver()->GetDofSet(); GetScheme()->Predict(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); // Applying constraints if needed auto& r_constraints_array = BaseType::GetModelPart().MasterSlaveConstraints(); const int local_number_of_constraints = r_constraints_array.size(); const int global_number_of_constraints = r_comm.SumAll(local_number_of_constraints); if(global_number_of_constraints != 0) { const auto& r_process_info = BaseType::GetModelPart().GetProcessInfo(); const auto it_const_begin = r_constraints_array.begin(); #pragma omp parallel for for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i) (it_const_begin + i)->ResetSlaveDofs(r_process_info); #pragma omp parallel for for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i) (it_const_begin + i)->Apply(r_process_info); // The following is needed since we need to eventually compute time derivatives after applying // Master slave relations TSparseSpace::SetToZero(rDx); this->GetScheme()->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb); } // Move the mesh if needed if (this->MoveMeshFlag() == true) BaseType::MoveMesh(); KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; if (mInitializeWasPerformed == false) { //pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); typename TConvergenceCriteriaType::Pointer p_convergence_criteria = mpConvergenceCriteria; //Initialize The Scheme - OPERATIONS TO BE DONE ONCE if (p_scheme->SchemeIsInitialized() == false) p_scheme->Initialize(BaseType::GetModelPart()); //Initialize The Elements - OPERATIONS TO BE DONE ONCE if (p_scheme->ElementsAreInitialized() == false) p_scheme->InitializeElements(BaseType::GetModelPart()); //Initialize The Conditions - OPERATIONS TO BE DONE ONCE if (p_scheme->ConditionsAreInitialized() == false) p_scheme->InitializeConditions(BaseType::GetModelPart()); //initialisation of the convergence criteria if (p_convergence_criteria->IsInitialized() == false) p_convergence_criteria->Initialize(BaseType::GetModelPart()); mInitializeWasPerformed = true; } KRATOS_CATCH(""); } /** * @brief Clears the internal storage */ void Clear() override { KRATOS_TRY; // Setting to zero the internal flag to ensure that the dof sets are recalculated. Also clear the linear solver stored in the B&S auto p_builder_and_solver = GetBuilderAndSolver(); if (p_builder_and_solver != nullptr) { p_builder_and_solver->SetDofSetIsInitializedFlag(false); p_builder_and_solver->Clear(); } // Clearing the system of equations if (mpA != nullptr) SparseSpaceType::Clear(mpA); if (mpDx != nullptr) SparseSpaceType::Clear(mpDx); if (mpb != nullptr) SparseSpaceType::Clear(mpb); // Clearing scheme auto p_scheme = GetScheme(); if (p_scheme != nullptr) { GetScheme()->Clear(); } mInitializeWasPerformed = false; mSolutionStepIsInitialized = false; KRATOS_CATCH(""); } /** * @brief This should be considered as a "post solution" convergence check which is useful for coupled analysis - the convergence criteria used is the one used inside the "solve" step */ bool IsConverged() override { KRATOS_TRY; TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; if (mpConvergenceCriteria->GetActualizeRHSflag() == true) { TSparseSpace::SetToZero(rb); GetBuilderAndSolver()->BuildRHS(GetScheme(), BaseType::GetModelPart(), rb); } return mpConvergenceCriteria->PostCriteria(BaseType::GetModelPart(), GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); KRATOS_CATCH(""); } /** * @brief This operations should be called before printing the results when non trivial results * (e.g. stresses) * Need to be calculated given the solution of the step * @details This operations should be called only when needed, before printing as it can involve a non * negligible cost */ void CalculateOutputData() override { TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; GetScheme()->CalculateOutputData(BaseType::GetModelPart(), GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); } /** * @brief Performs all the required operations that should be done (for each step) before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { KRATOS_TRY; if (!mSolutionStepIsInitialized) { // Pointers needed in the solution typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); ModelPart& r_model_part = BaseType::GetModelPart(); //set up the system, operation performed just once unless it is required //to reform the dof set at each iteration BuiltinTimer system_construction_time; if (p_builder_and_solver->GetDofSetIsInitializedFlag() == false || mReformDofSetAtEachStep == true) { //setting up the list of the DOFs to be solved BuiltinTimer setup_dofs_time; p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part); KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "Setup Dofs Time: " << setup_dofs_time.ElapsedSeconds() << std::endl; //shaping correctly the system BuiltinTimer setup_system_time; p_builder_and_solver->SetUpSystem(r_model_part); KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "Setup System Time: " << setup_system_time.ElapsedSeconds() << std::endl; //setting up the Vectors involved to the correct size BuiltinTimer system_matrix_resize_time; p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, mpA, mpDx, mpb, r_model_part); KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "System Matrix Resize Time: " << system_matrix_resize_time.ElapsedSeconds() << std::endl; } KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", BaseType::GetEchoLevel() > 0) << "System Construction Time: " << system_construction_time.ElapsedSeconds() << std::endl; TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; // Initial operations ... things that are constant over the Solution Step p_builder_and_solver->InitializeSolutionStep(r_model_part, rA, rDx, rb); // Initial operations ... things that are constant over the Solution Step p_scheme->InitializeSolutionStep(r_model_part, rA, rDx, rb); // Initialisation of the convergence criteria if (mpConvergenceCriteria->GetActualizeRHSflag() == true) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } mpConvergenceCriteria->InitializeSolutionStep(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); if (mpConvergenceCriteria->GetActualizeRHSflag() == true) TSparseSpace::SetToZero(rb); mSolutionStepIsInitialized = true; } KRATOS_CATCH(""); } /** * @brief Performs all the required operations that should be done (for each step) after solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void FinalizeSolutionStep() override { KRATOS_TRY; ModelPart& r_model_part = BaseType::GetModelPart(); typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; //Finalisation of the solution step, //operations to be done after achieving convergence, for example the //Final Residual Vector (mb) has to be saved in there //to avoid error accumulation p_scheme->FinalizeSolutionStep(r_model_part, rA, rDx, rb); p_builder_and_solver->FinalizeSolutionStep(r_model_part, rA, rDx, rb); mpConvergenceCriteria->FinalizeSolutionStep(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); //Cleaning memory after the solution p_scheme->Clean(); //reset flags for next step mSolutionStepIsInitialized = false; if (mReformDofSetAtEachStep == true) //deallocate the systemvectors { this->Clear(); } KRATOS_CATCH(""); } /** * @brief Solves the current step. This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { // Pointers needed in the solution ModelPart& r_model_part = BaseType::GetModelPart(); typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; //initializing the parameters of the Newton-Raphson cycle unsigned int iteration_number = 1; r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; bool residual_is_updated = false; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); bool is_converged = mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); // Function to perform the building and the solving phase. if (BaseType::mRebuildLevel > 0 || BaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); if (mUseOldStiffnessInFirstIteration){ p_builder_and_solver->BuildAndSolveLinearizedOnPreviousIteration(p_scheme, r_model_part, rA, rDx, rb,BaseType::MoveMeshFlag()); } else { p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); // Dx = 0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, BaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); if (is_converged) { if (mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } //Iteration Cycle... performed only for NonLinearProblems while (is_converged == false && iteration_number++ < mMaxIterationNumber) { //setting the number of iteration r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); //call the linear system solver to find the correction mDx for the //it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (BaseType::mRebuildLevel > 1 || BaseType::mStiffnessMatrixIsBuilt == false) { if (GetKeepSystemConstantDuringIterations() == false) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, BaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; if (is_converged == true) { if (mpConvergenceCriteria->GetActualizeRHSflag() == true) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; } is_converged = mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } } //plots a warning if the maximum number of iterations is exceeded if (iteration_number >= mMaxIterationNumber) { MaxIterationsExceeded(); } else { KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", this->GetEchoLevel() > 0) << "Convergence achieved after " << iteration_number << " / " << mMaxIterationNumber << " iterations" << std::endl; } //recalculate residual if needed //(note that some convergence criteria need it to be recalculated) if (residual_is_updated == false) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } //calculate reactions if required if (mCalculateReactionsFlag == true) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; } /** * @brief Function to perform expensive checks. * @details It is designed to be called ONCE to verify that the input is correct. */ int Check() override { KRATOS_TRY BaseType::Check(); GetBuilderAndSolver()->Check(BaseType::GetModelPart()); GetScheme()->Check(BaseType::GetModelPart()); mpConvergenceCriteria->Check(BaseType::GetModelPart()); return 0; KRATOS_CATCH("") } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "newton_raphson_strategy", "use_old_stiffness_in_first_iteration": false, "max_iteration" : 10, "reform_dofs_at_each_step" : false, "compute_reactions" : false, "builder_and_solver_settings" : {}, "convergence_criteria_settings" : {}, "linear_solver_settings" : {}, "scheme_settings" : {} })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "newton_raphson_strategy"; } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ /** * @brief This method returns the LHS matrix * @return The LHS matrix */ TSystemMatrixType &GetSystemMatrix() override { TSystemMatrixType &mA = *mpA; return mA; } /** * @brief This method returns the RHS vector * @return The RHS vector */ TSystemVectorType& GetSystemVector() override { TSystemVectorType& mb = *mpb; return mb; } /** * @brief This method returns the solution vector * @return The Dx vector */ TSystemVectorType& GetSolutionVector() override { TSystemVectorType& mDx = *mpDx; return mDx; } /** * @brief Set method for the flag mKeepSystemConstantDuringIterations * @param Value If we consider constant the system of equations during the iterations */ void SetKeepSystemConstantDuringIterations(bool Value) { mKeepSystemConstantDuringIterations = Value; } /** * @brief Get method for the flag mKeepSystemConstantDuringIterations * @return True if we consider constant the system of equations during the iterations, false otherwise */ bool GetKeepSystemConstantDuringIterations() { return mKeepSystemConstantDuringIterations; } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedNewtonRaphsonStrategy"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} private: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} protected: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ typename TSchemeType::Pointer mpScheme = nullptr; /// The pointer to the time scheme employed typename TBuilderAndSolverType::Pointer mpBuilderAndSolver = nullptr; /// The pointer to the builder and solver employed typename TConvergenceCriteriaType::Pointer mpConvergenceCriteria = nullptr; /// The pointer to the convergence criteria employed TSystemVectorPointerType mpDx; /// The increment in the solution TSystemVectorPointerType mpb; /// The RHS vector of the system of equations TSystemMatrixPointerType mpA; /// The LHS matrix of the system of equations /** * @brief Flag telling if it is needed to reform the DofSet at each solution step or if it is possible to form it just once * @details Default = false - true : Reforme at each time step - false : Form just one (more efficient) */ bool mReformDofSetAtEachStep; /** * @brief Flag telling if it is needed or not to compute the reactions * @details default = true */ bool mCalculateReactionsFlag; /** * @brief Flag telling if a full update of the database will be performed at the first iteration * @details default = false */ bool mUseOldStiffnessInFirstIteration = false; bool mSolutionStepIsInitialized; /// Flag to set as initialized the solution step unsigned int mMaxIterationNumber; /// The maximum number of iterations, 30 by default bool mInitializeWasPerformed; /// Flag to set as initialized the strategy bool mKeepSystemConstantDuringIterations; // Flag to allow keeping system matrix constant during iterations ///@} ///@name Private Operators ///@{ /** * @brief Here the database is updated * @param A The LHS matrix of the system of equations * @param Dx The incremement in the solution * @param b The RHS vector of the system of equations * @param MoveMesh The flag that allows to move the mesh */ virtual void UpdateDatabase( TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb, const bool MoveMesh) { typename TSchemeType::Pointer p_scheme = GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver(); p_scheme->Update(BaseType::GetModelPart(), p_builder_and_solver->GetDofSet(), rA, rDx, rb); // Move the mesh if needed if (MoveMesh == true) BaseType::MoveMesh(); } /** * @brief This method returns the components of the system of equations depending of the echo level * @param IterationNumber The non linear iteration in the solution loop */ virtual void EchoInfo(const unsigned int IterationNumber) { TSystemMatrixType& rA = *mpA; TSystemVectorType& rDx = *mpDx; TSystemVectorType& rb = *mpb; if (this->GetEchoLevel() == 2) //if it is needed to print the debug info { KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl; KRATOS_INFO("RHS") << "RHS = " << rb << std::endl; } else if (this->GetEchoLevel() == 3) //if it is needed to print the debug info { KRATOS_INFO("LHS") << "SystemMatrix = " << rA << std::endl; KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl; KRATOS_INFO("RHS") << "RHS = " << rb << std::endl; } else if (this->GetEchoLevel() == 4) //print to matrix market file { std::stringstream matrix_market_name; matrix_market_name << "A_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm"; TSparseSpace::WriteMatrixMarketMatrix((char *)(matrix_market_name.str()).c_str(), rA, false); std::stringstream matrix_market_vectname; matrix_market_vectname << "b_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << "_" << IterationNumber << ".mm.rhs"; TSparseSpace::WriteMatrixMarketVector((char *)(matrix_market_vectname.str()).c_str(), rb); } } /** * @brief This method prints information after reach the max number of iterations */ virtual void MaxIterationsExceeded() { KRATOS_INFO_IF("ResidualBasedNewtonRaphsonStrategy", this->GetEchoLevel() > 0) << "ATTENTION: max iterations ( " << mMaxIterationNumber << " ) exceeded!" << std::endl; } /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); mMaxIterationNumber = ThisParameters["max_iteration"].GetInt(); mReformDofSetAtEachStep = ThisParameters["reform_dofs_at_each_step"].GetBool(); mCalculateReactionsFlag = ThisParameters["compute_reactions"].GetBool(); mUseOldStiffnessInFirstIteration = ThisParameters["use_old_stiffness_in_first_iteration"].GetBool(); // Saving the convergence criteria to be used if (ThisParameters["convergence_criteria_settings"].Has("name")) { KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl; } // Saving the scheme if (ThisParameters["scheme_settings"].Has("name")) { KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl; } // Setting up the default builder and solver if (ThisParameters["builder_and_solver_settings"].Has("name")) { KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl; } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonStrategy(const ResidualBasedNewtonRaphsonStrategy &Other){}; ///@} }; /* Class ResidualBasedNewtonRaphsonStrategy */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos. */ #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_STRATEGY defined */
parallel_for.h
/* Copyright (c) 2016, Taiga Nomi All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. */ #pragma once #include <cassert> #include <cstdio> #include <limits> #include <string> #include <type_traits> #include <vector> #include "aligned_allocator.h" #include "nn_error.h" #include "tiny_dnn/config.h" #ifdef CNN_USE_TBB #ifndef NOMINMAX #define NOMINMAX // tbb includes windows.h in tbb/machine/windows_api.h #endif #include <tbb/task_group.h> #include <tbb/tbb.h> #endif #if !defined(CNN_USE_OMP) && !defined(CNN_SINGLE_THREAD) #include <future> #include <thread> #endif #if defined(CNN_USE_GCD) && !defined(CNN_SINGLE_THREAD) #include <dispatch/dispatch.h> #endif namespace tiny_dnn { #ifdef CNN_USE_TBB static tbb::task_scheduler_init tbbScheduler( tbb::task_scheduler_init::automatic); // tbb::task_scheduler_init::deferred); typedef tbb::blocked_range<int> blocked_range; template <typename Func> void parallel_for(int begin, int end, const Func &f, int grainsize) { tbb::parallel_for( blocked_range(begin, end, end - begin > grainsize ? grainsize : 1), f); } template <typename Func> void xparallel_for(int begin, int end, const Func &f) { f(blocked_range(begin, end, 100)); } #else struct blocked_range { typedef int const_iterator; blocked_range(int begin, int end) : begin_(begin), end_(end) {} blocked_range(size_t begin, size_t end) : begin_(static_cast<int>(begin)), end_(static_cast<int>(end)) {} const_iterator begin() const { return begin_; } const_iterator end() const { return end_; } private: int begin_; int end_; }; template <typename Func> void xparallel_for(size_t begin, size_t end, const Func &f) { blocked_range r(begin, end); f(r); } #if defined(CNN_USE_OMP) template <typename Func> void parallel_for(int begin, int end, const Func &f, int /*grainsize*/) { #pragma omp parallel for for (int i = begin; i < end; ++i) f(blocked_range(i, i + 1)); } #elif defined(CNN_USE_GCD) template <typename Func> void parallel_for(int begin, int end, const Func &f, int grainsize) { int count = end - begin; int blockSize = grainsize; if (count < blockSize || blockSize == 0) { blockSize = 1; } int blockCount = (count + blockSize - 1) / blockSize; assert(blockCount > 0); dispatch_apply(blockCount, dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0), ^(size_t block) { int blockStart = static_cast<int>(block * blockSize); int blockEnd = blockStart + blockSize; if (blockEnd > end) { blockEnd = end; } assert(blockStart < blockEnd); f(blocked_range(blockStart, blockEnd)); }); } #elif defined(CNN_SINGLE_THREAD) template <typename Func> void parallel_for(int begin, int end, const Func &f, int /*grainsize*/) { xparallel_for(static_cast<size_t>(begin), static_cast<size_t>(end), f); } #else template <typename Func> void parallel_for(int start, int end, const Func &f, int /*grainsize*/) { int nthreads = std::thread::hardware_concurrency(); int blockSize = (end - start) / nthreads; if (blockSize * nthreads < end - start) blockSize++; std::vector<std::future<void> > futures; int blockStart = start; int blockEnd = blockStart + blockSize; if (blockEnd > end) blockEnd = end; for (int i = 0; i < nthreads; i++) { futures.push_back( std::move(std::async(std::launch::async, [blockStart, blockEnd, &f] { f(blocked_range(blockStart, blockEnd)); }))); blockStart += blockSize; blockEnd = blockStart + blockSize; if (blockStart >= end) break; if (blockEnd > end) blockEnd = end; } for (auto &future : futures) future.wait(); } #endif #endif // CNN_USE_TBB template <typename T, typename U> bool value_representation(U const &value) { return static_cast<U>(static_cast<T>(value)) == value; } template <typename T, typename Func> inline void for_(std::true_type, bool parallelize, int begin, T end, Func f, int grainsize = 100) { parallelize = parallelize && value_representation<int>(end); parallelize ? parallel_for(begin, static_cast<int>(end), f, grainsize) : xparallel_for(begin, static_cast<int>(end), f); } template <typename T, typename Func> inline void for_(std::false_type, bool parallelize, int begin, T end, Func f, int grainsize = 100) { parallelize ? parallel_for(begin, static_cast<int>(end), f, grainsize) : xparallel_for(begin, end, f); } template <typename T, typename Func> inline void for_( bool parallelize, int begin, T end, Func f, int grainsize = 100) { static_assert(std::is_integral<T>::value, "end must be integral type"); for_(typename std::is_unsigned<T>::type(), parallelize, begin, end, f, grainsize); } template <typename T, typename Func> void for_i(bool parallelize, T size, Func f, int grainsize = 100) { #ifdef CNN_SINGLE_THREAD parallelize = false; #endif for_(parallelize, 0, size, [&](const blocked_range &r) { #ifdef CNN_USE_OMP #pragma omp parallel for #endif for (int i = r.begin(); i < r.end(); i++) f(i); }, grainsize); } template <typename T, typename Func> void for_i(T size, Func f, int grainsize = 100) { for_i(true, size, f, grainsize); } } // namespace tiny_dnn
discretizeMT.c
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include <float.h> #include <string.h> #include <mpi.h> #include "compearth.h" #include "parmt_mtsearch.h" #ifdef PARMT_USE_INTEL #include <mkl_lapacke.h> #include <mkl_cblas.h> #else #include <lapacke.h> #include <cblas.h> #endif #include "iscl/array/array.h" #include "iscl/memory/memory.h" /* int main_temp() { double *betas, *gammas, *kappas, *M0s, *mts, *sigmas, *thetas; int i, ierr, nmt; const int ng = 29; const int nb = 29; const int nk = 6; const int ns = 6; const int nt = 6; const int nm = 1; const int ldm = 8; const double oneDeg = M_PI/180.0; const double betaMin = 0.0 + oneDeg; const double betaMax = M_PI - oneDeg; const double gammaMin =-M_PI/6.0 + oneDeg; const double gammaMax = M_PI/6.0 - oneDeg; const double kappaMin = 0.0 + oneDeg; const double kappaMax = 2.0*M_PI - oneDeg; const double thetaMin = 0.0 + oneDeg; const double thetaMax = 0.5*M_PI - oneDeg; const double sigmaMin =-0.5*M_PI + oneDeg; const double sigmaMax = 0.5*M_PI - oneDeg; nmt = nm*nb*ng*nk*ns*nt; mts = memory_calloc64f(nmt*ldm); //(double *)calloc((size_t) (nmt*ldm), sizeof(double)); M0s = memory_calloc64f(nm); //(double *)calloc((size_t) nm, sizeof(double)); betas = memory_calloc64f(nb); //(double *)calloc((size_t) nb, sizeof(double)); gammas = memory_calloc64f(ng); //(double *)calloc((size_t) ng, sizeof(double)); kappas = memory_calloc64f(nk); //(double *)calloc((size_t) nk, sizeof(double)); sigmas = memory_calloc64f(ns); //(double *)calloc((size_t) ns, sizeof(double)); thetas = memory_calloc64f(nt); //(double *)calloc((size_t) nt, sizeof(double)); ierr = array_linspace64f_work(1.0/sqrt(2.0), 1.0/sqrt(2.0), nm, M0s); ierr = array_linspace64f_work(betaMin+0.001, betaMax-0.001, nb, betas); ierr = array_linspace64f_work(gammaMin, gammaMax, ng, gammas); ierr = array_linspace64f_work(kappaMin, kappaMax, nk, kappas); ierr = array_linspace64f_work(thetaMin, thetaMax, nt, thetas); ierr = array_linspace64f_work(sigmaMin, sigmaMax, ns, sigmas); // avoid warnings on theta in tt2cmt ierr = gridSearch_discretizeMT64f(ng, gammas, nb, betas, nm, M0s, nk, kappas, nt, thetas, ns, sigmas, ldm, nmt, mts); double Mdc[6], lam[3], U[6], theta; compearth_tt2cmt(0.0, 0.0, 1./sqrt(2.), 0.0, 0.1, 0.0, Mdc, lam, U); FILE *ofl = fopen("mts.txt", "w"); for (i=0; i<nmt; i++) { compearth_angleMT(1, Mdc, &mts[ldm*i], &theta); fprintf(ofl, "%f %f %f %f %f %f %f\n", //i+1, mts[ldm*i+0], mts[ldm*i+1], mts[ldm*i+2], mts[ldm*i+3], mts[ldm*i+4], mts[ldm*i+5], theta); } fclose(ofl); free(mts); free(M0s); free(betas); free(gammas); free(kappas); free(sigmas); free(thetas); return 0; } */ //============================================================================// /*! * @brief Provides a cell based discretization of the moment tensor space. * * @param[in] nb Number of colatitudes. * @param[in] betaLower Lower colatitude in grid search [0, betaUpper). * @param[in] betaUpper Upper colatitude in grid search (betaLower, pi]. * @param[in] ng Number of longitudes. * @param[in] gammaLower Lower longitude in grid search [-pi/6, gammaUpper). * @param[in] gammaUpper Upper longitude in grid search (gammaLower, pi/6]. * @param[in] nk Number of strikes in grid search. * @param[in] kappaLower Lower strike in grid search [0, kappaUpper). * @param[in] kappaUpper Upper strike in grid search (kappaLower, 2*pi]. * @param[in] ns Number of slips in grid search. * @param[in] sigmaLower Lower slip angle in grid search [-pi, sigmaUpper). * @param[in] sigmaUpper Upper slip angle in grid search (sigmaUpper, pi]. * @param[in] nt Number of dips in grid search. * @param[in] thetaLower Lower dip in grid search [0,dipUpper). * @param[in] thetaUpper Upper dip in grid search (dipLower, pi/2]. * @param[in] nm Number of magnitudes. * @param[in] m0Lower Lower scalar moment in grid search (Newton-meters). * @param[in] m0Upper Upper scalar moment in grid search (Newton-meters). * @param[in] luseLog If true then the the scalar moments will be * discretized on a log scale. * * @param[out] betas Colatitudes (radians) at cell centers [nb]. * @param[out] gammas Longitudes (radians) at cell centers [ng]. * @param[out] kappas Strike angles (radians) at cell centers [nk]. * @param[out] sigmas Slip angles (radians) at cell centers [ns]. * @param[out] thetas Dip angles (radians) at cell centers [nt]. * @param[out] M0s Scalar moments (Newton-meters) in grid search [nm]. * * @author Ben Baker * * @copyright ISTI distributed under Apache 2 * */ int parmt_discretizeCells64f( const int nb, const double betaLower, const double betaUpper, const int ng, const double gammaLower, const double gammaUpper, const int nk, const double kappaLower, const double kappaUpper, const int ns, const double sigmaLower, const double sigmaUpper, const int nt, const double thetaLower, const double thetaUpper, const int nm, const double m0Lower, const double m0Upper, const bool luseLog, double **betas, double **gammas, double **kappas, double **sigmas, double **thetas, double **M0s) { const char *fcnm = "parmt_discretizeCells64f\0"; double *mwl, *u, *v, *h; double du, du2, dv, dv2, dh, dh2, dk, dk2, ds, ds2, hLower, hUpper, mwll, mwul, uLower, uUpper, vLower, vUpper; int ierr; const double two = 2.0; // Initialize and do some basic error checks u = NULL; v = NULL; h = NULL; if (nb < 1 || ng < 1 || nk < 1 || ns < 1 || nt < 1 || nm < 1) { if (nb < 1){printf("%s: no betas\n", fcnm);} if (ng < 1){printf("%s: no gammas\n", fcnm);} if (nk < 1){printf("%s: no kappas\n", fcnm);} if (ns < 1){printf("%s: no sigmas\n", fcnm);} if (nt < 1){printf("%s: no thetas\n", fcnm);} if (nm < 1){printf("%s: no magnitudes\n", fcnm);} return -1; } // Discretize the moment tensor space in (u, v, h) space compearth_beta2u(1, &betaLower, &uLower); compearth_beta2u(1, &betaUpper, &uUpper); compearth_gamma2v(1, &gammaLower, &vLower); compearth_gamma2v(1, &gammaUpper, &vUpper); compearth_theta2h(1, &thetaLower, &hLower); compearth_theta2h(1, &thetaUpper, &hUpper); du = (uUpper - uLower)/(double) nb; dv = (vUpper - vLower)/(double) ng; dh =-(hUpper - hLower)/(double) nt; // negative sign makes +theta increasing dk = (kappaUpper - kappaLower)/(double) nk; ds = (sigmaUpper - sigmaLower)/(double) ns; du2 = du/two; dv2 = dv/two; dh2 = dh/two; u = array_linspace64f(uLower+du2, uUpper-du2, nb, &ierr); v = array_linspace64f(vLower+dv2, vUpper-dv2, ng, &ierr); h = array_linspace64f(hLower-dh2, hUpper+dh2, nt, &ierr); *betas = memory_calloc64f(nb); *gammas = memory_calloc64f(ng); *thetas = memory_calloc64f(nt); //compearth_u2beta(nb, 20, 2, u, 1.e-6, *betas); compearth_u2beta(nb, u, *betas); compearth_v2gamma(ng, v, *gammas); compearth_h2theta(nt, h, *thetas); // discretize the magnitudes, kappa, and sigma dk2 = dk/two; ds2 = ds/two; if (!luseLog || nm == 1) { *M0s = array_linspace64f(m0Lower, m0Upper, nm, &ierr); } else { // TODO: I doubt this is correct because I don't understand how // lune volumes map to `spherical volumes'. For simplicity I'll // use a shell but I think I should be looking at the `active // area' of the beta and gamma grid search. In the interim I'll // use the volume of a spherical shell which is given by // \frac{4 \pi R^3}{3} where R is the radius (magnitude). // To get some form of uniform volume spacing I'll take the log // at the upper and lower limit and discretize evenly in logspace // then come back to M0. /* m0ll = 3.0*log(4.0*M_PI*m0Lower/3.0); m0ul = 3.0*log(4.0*M_PI*m0Upper/3.0); m0l = array_linspace64f(m0ll, m0ul, nm, &ierr); // Now solve for m0: 3/(4*pi)*exp(m0l/3) cblas_dscal(nm, 1.0/3.0, m0l, 1); // m0l = m0l/3 *M0s = array_exp64f(nm, m0l, &ierr); // *M0 = exp(m0l/3) cblas_dscal(nm, 3.0/(4.0*M_PI), *M0s, 1); // *M0 = 3/(4*pi)exp(m0l/3) memory_free64f(&m0l); */ // TODO: An alternative to doing math is just discretizing on a // the magnitude scale compearth_m02mw(1, CE_KANAMORI_1978, &m0Lower, &mwll); compearth_m02mw(1, CE_KANAMORI_1978, &m0Upper, &mwul); mwl = array_linspace64f(mwll, mwul, nm, &ierr); *M0s = memory_calloc64f(nm); ierr = compearth_mw2m0(nm, CE_KANAMORI_1978, mwl, *M0s); memory_free64f(&mwl); } *kappas = array_linspace64f(kappaLower+dk2, kappaUpper-dk2, nk, &ierr); *sigmas = array_linspace64f(sigmaLower+ds2, sigmaUpper-ds2, ns, &ierr); memory_free64f(&u); memory_free64f(&v); memory_free64f(&h); return ierr; } //============================================================================// /*! * @brief Computes the moment tensors in the grid search. * The grid search ordering is: * Loop on magnitudes * Loop on colatitudes * Loop on longitudes * Loop on strikes * Loop on slips * Loop on dips * * @param[in] comm MPI communicator on which moment tensor will be split * @param[in] ng number of longitudes * @param[in] gammas longitudes (radians) on lune s.t. * \f$ \gamma \in [-\pi/6, \pi/6] \f$ [ng] * @param[in] nb number of colatitudes * @param[in] betas colatitudes (radians) on lune s.t. * \f$ \beta \in [0, \pi] \f$ [nb] * @param[in] nm number of scalar moments * @param[in] M0s scalar moments in Newton-meters [nm]. * for a `unit' moment tensor choose M0 = 1/sqrt(2) * @param[in] nk number of strike angles * @param[in] kappas strike angles (radians) s.t. * \f$ \kappa \in [0, 2\pi] \f$ [nk] * @param[in] nt number of dip angles * @param[in] thetas dips angles (radians) s.t. * \f$ \theta \in [0, \pi/2] \f$ [nt] * @param[in] ns number of slip angles * @param[in] sigmas slip angles (radians) s.t. * \f$ \sigma \in [-\pi/2, \pi/2] \f$ [ns] * @param[in] ldm leading dimension of mts. must be >= 6. * @param[in] nmt number of moment tensors (=nm*nb*ng*nk*ns*nt) * * @param[out] mts moment tensors (Newton-meters) in north,east,down s.t. * \f$ \textbf{m} * = \f$ \{m_{xx}, m_{yy}, m_{zz}, * m_{xy}, m_{xz}, m_{yz}\} \f$. * This has dimensions [nm*nb*ng*nk*ns*nt x ldm]. Observe * the ordering as defind above. * * @result 0 indicates success * * @author Ben Baker * * @copyright ISTI licensed under Apached 2 * */ int parmt_discretizeMT64f_MPI(const MPI_Comm comm, const int ng, const double *__restrict__ gammas, const int nb, const double *__restrict__ betas, const int nm, const double *__restrict__ M0s, const int nk, const double *__restrict__ kappas, const int nt, const double *__restrict__ thetas, const int ns, const double *__restrict__ sigmas, const int ldm, struct localMT_struct *mts) { const char *fcnm = "parmt_discretizeMT64f_MPI\0"; double *mtWork; int64_t nmt64; int *displ, *nmtProc, *offset, *sendCounts; int dmt, i, ierr, imt1, imt2, myid, nmtall, nmt, nprocs; const int master = 0; //------------------------------------------------------------------------// ierr = 0; MPI_Comm_size(comm, &nprocs); MPI_Comm_rank(comm, &myid); memset(mts, 0, sizeof(struct localMT_struct)); mts->comm = comm; // Check for integer overflow nmt64 = (int64_t) ng * (int64_t) nb * (int64_t) nm *(int64_t) nk * (int64_t) nt * (int64_t) ns; if (nmt64 > INT_MAX) { printf("%s: Error integer overflow - gridsearch too large\n", fcnm); return -1; } nmt = (int) nmt64; // Verify the inputs if (ldm < 6 || ng < 1 || nb < 1 || nm < 1 || nk < 1 || nt < 1 || ns < 1 || gammas == NULL || betas == NULL || M0s == NULL || kappas == NULL || thetas == NULL || sigmas == NULL) { if (ldm < 6){printf("%s: Invalid leading dimension\n", fcnm);} if (ng < 1){printf("%s: ng must be positive\n", fcnm);} if (nb < 1){printf("%s: nb must be positive\n", fcnm);} if (nm < 1){printf("%s: nm must be positive\n", fcnm);} if (nk < 1){printf("%s: nk must be positive\n", fcnm);} if (nt < 1){printf("%s: nt must be positive\n", fcnm);} if (ns < 1){printf("%s: ns must be positive\n", fcnm);} if (gammas == NULL){printf("%s: gammas is NULL\n", fcnm);} if (betas == NULL){printf("%s: betas is NULL\n", fcnm);} if (M0s == NULL){printf("%s: M0s is NULL\n", fcnm);} if (kappas == NULL){printf("%s: kappas is NULL\n", fcnm);} if (thetas == NULL){printf("%s: thetas is NULL\n", fcnm);} if (sigmas == NULL){printf("%s: sigmas is NULL\n", fcnm);} return -1; } // divide the moment tensor grid dmt = MAX(nmt/nprocs, 1); imt1 = myid*dmt; imt2 = (myid + 1)*dmt; if (myid == nprocs - 1){imt2 = nmt;} mts->nmt = imt2 - imt1; mts->ldm = ldm; MPI_Allreduce(&mts->nmt, &nmtall, 1, MPI_INTEGER, MPI_SUM, mts->comm); if (nmtall != nmt) { if (myid == master) { printf("%s: Failed to partition domain %d %d\n", fcnm, nmtall, nmt); } return -1; } // create the requisite information for an MPI_GATHER mts->nmtProc = memory_calloc32i(nprocs); mts->offset = memory_calloc32i(nprocs); nmtProc = memory_calloc32i(nprocs); offset = memory_calloc32i(nprocs); nmtProc[myid] = mts->nmt; offset[myid] = imt1; MPI_Allreduce(nmtProc, mts->nmtProc, nprocs, MPI_INTEGER, MPI_SUM, mts->comm); MPI_Allreduce(offset, mts->offset, nprocs, MPI_INTEGER, MPI_SUM, mts->comm); mts->commSize = nprocs; mts->nmtAll = nmtall; mts->myid = myid; memory_free32i(&nmtProc); memory_free32i(&offset); // have master process compute all mts and scatter them // TODO this should be parallel if (mts->myid == master) { mtWork = memory_calloc64f(mts->ldm*mts->nmtAll); ierr = parmt_discretizeMT64f(ng, gammas, nb, betas, nm, M0s, nk, kappas, nt, thetas, ns, sigmas, mts->ldm, mts->nmtAll, mtWork); } else { mtWork = memory_calloc64f(1); } MPI_Bcast(&ierr, 1, MPI_INT, master, mts->comm); if (mts->commSize > 1) { // figure out who gets what part of the moment tensor space sendCounts = memory_calloc32i(mts->commSize); displ = memory_calloc32i(mts->commSize); for (i=0; i<mts->commSize; i++) { sendCounts[i] = mts->nmtProc[i]*mts->ldm; displ[i] = mts->ldm*mts->offset[i]; } // set the memory and distribute it mts->mts = memory_calloc64f(mts->ldm*MAX(1,mts->nmt)); MPI_Scatterv(mtWork, sendCounts, displ, MPI_DOUBLE, mts->mts, mts->ldm*mts->nmt, MPI_DOUBLE, master, mts->comm); memory_free64f(&mtWork); } else { mts->mts = mtWork; } return ierr; } //============================================================================// /*! * @brief Computes the moment tensors in the grid search. * The grid search ordering is: * Loop on magnitudes * Loop on colatitudes * Loop on longitudes * Loop on strikes * Loop on slips * Loop on dips * * @param[in] ng number of longitudes * @param[in] gammas longitudes (radians) on lune s.t. * \f$ \gamma \in [-\pi/6, \pi/6] \f$ [ng] * @param[in] nb number of colatitudes * @param[in] betas colatitudes (radians) on lune s.t. * \f$ \beta \in [0, \pi] \f$ [nb] * @param[in] nm number of scalar moments * @param[in] M0s scalar moments in Newton-meters [nm]. * for a `unit' moment tensor choose M0 = 1/sqrt(2) * @param[in] nk number of strike angles * @param[in] kappas strike angles (radians) s.t. * \f$ \kappa \in [0, 2\pi] \f$ [nk] * @param[in] nt number of dip angles * @param[in] thetas dips angles (radians) s.t. * \f$ \theta \in [0, \pi/2] \f$ [nt] * @param[in] ns number of slip angles * @param[in] sigmas slip angles (radians) s.t. * \f$ \sigma \in [-\pi/2, \pi/2] \f$ [ns] * @param[in] ldm leading dimension of mts. must be >= 6. * @param[in] nmt number of moment tensors (=nm*nb*ng*nk*ns*nt) * * @param[out] mts moment tensors (Newton-meters) in north,east,down s.t. * \f$ \textbf{m} * = \f$ \{m_{xx}, m_{yy}, m_{zz}, * m_{xy}, m_{xz}, m_{yz}\} \f$. * This has dimensions [nm*nb*ng*nk*ns*nt x ldm]. Observe * the ordering as defind above. * * @result 0 indicates success * * @author Ben Baker * * @copyright ISTI licensed under Apached 2 * */ int parmt_discretizeMT64f(const int ng, const double *__restrict__ gammas, const int nb, const double *__restrict__ betas, const int nm, const double *__restrict__ M0s, const int nk, const double *__restrict__ kappas, const int nt, const double *__restrict__ thetas, const int ns, const double *__restrict__ sigmas, const int ldm, const int nmt, double *__restrict__ mts) { const char *fcnm = "parmt_discretizeMT64f\0"; double lam[3], Muse[6], U[9] __attribute__ ((aligned (64))); double *mtWork, deltaDeg, gammaDeg, kappaDeg, sigmaDeg, thetaDeg; int i, ierr, ierr1, ib, ig, indx, im, imt, ik, is, it, nmtBase; const double M01[1] = {1.0}; const double pi180i = 180.0/M_PI; const double betaMin = 0.0; const double betaMax = M_PI; const double gammaMin =-M_PI/6.0; const double gammaMax = M_PI/6.0; const double kappaMin = 0.0; const double kappaMax = 2.0*M_PI; const double thetaMin = 0.0; const double thetaMax = M_PI_2; const double sigmaMin =-M_PI_2; const double sigmaMax = M_PI_2; //const double sqrt2i = 1.0/sqrt(2.0); // unit magnitude MT //------------------------------------------------------------------------// ierr = 0; // Verify the inputs if (ldm < 6 || ng < 1 || nb < 1 || nm < 1 || nk < 1 || nt < 1 || ns < 1 || nmt != ng*nb*nm*nk*nt*ns || gammas == NULL || betas == NULL || M0s == NULL || kappas == NULL || thetas == NULL || sigmas == NULL) { if (ldm < 6){printf("%s: Invalid leading dimension\n", fcnm);} if (ng < 1){printf("%s: ng must be positive\n", fcnm);} if (nb < 1){printf("%s: nb must be positive\n", fcnm);} if (nm < 1){printf("%s: nm must be positive\n", fcnm);} if (nk < 1){printf("%s: nk must be positive\n", fcnm);} if (nt < 1){printf("%s: nt must be positive\n", fcnm);} if (ns < 1){printf("%s: ns must be positive\n", fcnm);} if (nmt != ng*nb*nm*nk*nt*ns) { printf("%s: nmt != ng*nb*nm*nk*nt*ns %d %d\n", fcnm, nmt, ng*nb*nm*nk*nt*ns); } if (gammas == NULL){printf("%s: gammas is NULL\n", fcnm);} if (betas == NULL){printf("%s: betas is NULL\n", fcnm);} if (M0s == NULL){printf("%s: M0s is NULL\n", fcnm);} if (kappas == NULL){printf("%s: kappas is NULL\n", fcnm);} if (thetas == NULL){printf("%s: thetas is NULL\n", fcnm);} if (sigmas == NULL){printf("%s: sigmas is NULL\n", fcnm);} return -1; } for (i=0; i<ng; i++) { if (gammas[i] < gammaMin || gammas[i] > gammaMax) { printf("%s: gammas %f out or range [%f,%f]\n", fcnm, gammas[i], gammaMin, gammaMax); return -1; } } for (i=0; i<nb; i++) { if (betas[i] < betaMin || betas[i] > betaMax) { printf("%s: betas %f out or range [%f,%f]\n", fcnm, betas[i], betaMin, betaMax); return -1; } } for (i=0; i<nk; i++) { if (kappas[i] < kappaMin || kappas[i] > kappaMax) { printf("%s: kappas %f out or range [%f,%f]\n", fcnm, kappas[i], kappaMin, kappaMax); return -1; } } for (i=0; i<nt; i++) { if (thetas[i] < thetaMin || thetas[i] > thetaMax) { printf("%s: thetas %f out or range [%f,%f]\n", fcnm, thetas[i], thetaMin, thetaMax); return -1; } } for (i=0; i<ns; i++) { if (sigmas[i] < sigmaMin || sigmas[i] > sigmaMax) { printf("%s: sigmas %f out or range [%f,%f]\n", fcnm, sigmas[i], sigmaMin, sigmaMax); return -1; } } // The moment tensors are equivalent up to a scaling factor - so compute // the unit moment tensor nmtBase = nb*ng*nk*ns*nt; if (nm == 1) { mtWork = mts; } else { mtWork = memory_calloc64f(nmtBase*ldm); } #ifdef _OPENMP #pragma omp parallel for collapse(5) \ firstprivate (lam, M01, Muse, U) \ private (deltaDeg, gammaDeg, kappaDeg, \ sigmaDeg, thetaDeg, ierr1, imt, ib, ig, ik, is, it) \ shared (fcnm, M0s, betas, gammas, kappas, sigmas, mtWork, thetas) \ reduction (max:ierr) \ default (none) #endif // Loop on colatitudes for (ib=0; ib<nb; ib++) { // Loop on longitudes for (ig=0; ig<ng; ig++) { // Loop on strike for (ik=0; ik<nk; ik++) { // Loop on slip for (is=0; is<ns; is++) { // Loop on dip for (it=0; it<nt; it++) { // tape**2 space term //M0 = sqrt2i; deltaDeg = (M_PI/2.0 - betas[ib])*pi180i; gammaDeg = gammas[ig]*pi180i; kappaDeg = kappas[ik]*pi180i; sigmaDeg = sigmas[is]*pi180i; thetaDeg = thetas[it]*pi180i; // index imt = ib*ng*nk*ns*nt + ig*nk*ns*nt + ik*ns*nt + is*nt + it; // Compute the corresponding moment tensor. // Silver and Jordan compute M0 = 1/sqrt(2)norm(M) // hence, if it is the scalar moment computation // that handles the 1/sqrt(2) which means I should // use unity here. This has been verified b/c // apply CMT2m0 on this moment tensor will yield // the input M0. ierr1 = compearth_TT2CMT(1, &gammaDeg, &deltaDeg, M01, &kappaDeg, &thetaDeg, &sigmaDeg, Muse, lam, U); /* ierr1 = compearth_tt2cmt(gammaDeg, deltaDeg, 1.0, //sqrt2i, kappaDeg, thetaDeg, sigmaDeg, Muse, lam, U); */ // Convert from USE to our NED estimation basis ierr1 = compearth_convertMT(1, CE_USE, CE_NED, Muse, &mtWork[imt*ldm]); if (ierr1 != 0) { printf("%s: Error changing coords\n", fcnm); ierr = ierr + 1; continue; } } // loop on dip } // loop on slip } // loop on strike } // loop on longitudes } // loop on latitudes if (ierr != 0) { printf("%s: Errors during mt computation\n", fcnm); ierr = 1; } // Now copy all moment tensors over if (nm == 1) { cblas_dscal(ldm*nmtBase, M0s[0], mtWork, 1); } else { for (im=0; im<nm; im++) { indx = im*nmtBase*ldm; //nb*ng*nk*ns*nt; for (imt=0; imt<ldm*nmtBase; imt++) { mts[indx+imt] = M0s[im]*mtWork[imt]; } } memory_free64f(&mtWork); } mtWork = NULL; return ierr; }
GB_binop__plus_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__plus_uint8 // A.*B function (eWiseMult): GB_AemultB__plus_uint8 // A*D function (colscale): GB_AxD__plus_uint8 // D*A function (rowscale): GB_DxB__plus_uint8 // C+=B function (dense accum): GB_Cdense_accumB__plus_uint8 // C+=b function (dense accum): GB_Cdense_accumb__plus_uint8 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_uint8 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_uint8 // C=scalar+B GB_bind1st__plus_uint8 // C=scalar+B' GB_bind1st_tran__plus_uint8 // C=A+scalar GB_bind2nd__plus_uint8 // C=A'+scalar GB_bind2nd_tran__plus_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x + y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_UINT8 || GxB_NO_PLUS_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__plus_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__plus_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__plus_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__plus_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__plus_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__plus_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__plus_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__plus_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__plus_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__plus_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB_bind1st_tran__plus_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB_bind2nd_tran__plus_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
outer_mult.h
#include "CSC.h" #include "CSR.h" #include "Triple.h" #include "radix_sort/radix_sort.hpp" #include "utility.h" #include <algorithm> #include <iostream> #include <omp.h> #include <unistd.h> #include <cstring> using namespace std; static uint32_t nrows_per_blocker; static uint32_t ncols_per_blocker; static uint32_t ncols_of_A; template <typename IT> uint16_t fast_mod(const IT input, const int ceil) { return input >= ceil ? input % ceil : input; } template <typename IT, typename NT> uint64_t getFlop(const CSC<IT, NT>& A, const CSR<IT, NT>& B) { uint64_t flop = 0; #pragma omp parallel for reduction(+ : flop) for (IT i = 0; i < A.cols; ++i) { IT colnnz = A.colptr[i + 1] - A.colptr[i]; IT rownnz = B.rowptr[i + 1] - B.rowptr[i]; flop += (colnnz * rownnz); } return flop; } // testing different read bandwidths template <typename IT, typename NT> uint64_t ReadBW(const CSC<IT, NT>& A, const CSR<IT, NT>& B) { NT flop; double start = omp_get_wtime(); for(int it = 0; it<10; it++) { flop = 0; #pragma omp parallel for reduction(+ : flop) for (IT i = 0; i < A.nnz; ++i) { flop += A.rowids[i]; } } double end = omp_get_wtime(); double bytes = A.nnz * sizeof(IT); double readbw = bytes / (1000000000 * (end-start)/10); cout << "Read Bandwidth (reading rowids) = " << readbw << " GB/s" << endl; cout << "dummy sum:" << flop << endl; /* flop = 0; start = omp_get_wtime(); #pragma omp parallel for reduction(+ : flop) for (IT i = 0; i < A.nnz; ++i) { flop ++; } end = omp_get_wtime(); cout << "time = " << 1000* (end-start) << " flop" << flop << endl; flop = 0; start = omp_get_wtime(); #pragma omp parallel for reduction(+ : flop) for (IT i = 0; i < A.nnz; ++i) { for(IT j=0; j<1; j++) { flop ++; } } end = omp_get_wtime(); cout << "time = " << 1000*(end-start) << " flop" << flop<< endl; */ start = omp_get_wtime(); for(int it = 0; it<10; it++) { flop = 0; #pragma omp parallel for reduction(+ : flop) for (IT i = 0; i < A.cols; i++) { for(IT j=A.colptr[i]; j<A.colptr[i + 1]; j++) { // IT rowid = A.rowids[j]; // for(IT k=B.rowptr[i]; k<B.rowptr[i+1]; k++) // { // IT colid = B.colids[k]; flop+= A.rowids[j]; // } } } } end = omp_get_wtime(); bytes = (A.nnz) * sizeof(IT) + (A.cols) * sizeof(IT); readbw = bytes / (1000000000 * (end-start)/10); cout << "Read Bandwidth (reading rowids via colptr) = " << readbw << " GB/s" << endl; cout << "dummy sum:" << flop << endl; /* #pragma omp parallel for reduction(+ : flop) for (IT i = 0; i < A.cols; i+=8) { for(int kk=0; kk<8; kk++) { int nA = A.colptr[i+1] - A.colptr[i]; int nB = B.rowptr[i+1] - B.rowptr[i]; if(nA >=4 && nB>=4) { for(IT j=A.colptr[i+kk]; j<A.colptr[i+kk+1]; j++) { IT rowid = A.rowids[j]; for(IT k=B.rowptr[i+kk]; k<B.rowptr[i+kk+1]; k++) { IT colid = B.colids[k]; flop+= (rowid+colid); } } } } */ start = omp_get_wtime(); for(int it = 0; it<10; it++) { flop = 0; #pragma omp parallel for reduction(+ : flop) for (IT i = 0; i < A.cols; i++) { //IT colid = i; for(IT j=A.colptr[i]; j<A.colptr[i + 1]; j++) { IT rowid = A.rowids[j]; for(IT k=B.rowptr[i]; k<B.rowptr[i+1]; k++) { IT colid = B.colids[k]; flop+= (rowid+colid); } } } } end = omp_get_wtime(); bytes = (A.nnz + B.nnz) * sizeof(IT) + (A.cols + B.rows) * sizeof(IT); readbw = bytes / (1000000000 * (end-start)/10); cout << "Read Bandwidth (both A and B) = " << readbw << " GB/s" << endl; cout << "dummy sum:" << flop << endl; return flop; } template <typename IT, typename NT> void do_symbolic(const CSC<IT, NT>& A, const CSR<IT, NT>& B, IT startIdx, IT endIdx, uint16_t nrows_per_blocker, uint16_t ncols_per_blocker, uint16_t num_blockers, IT* flop_groupby_row_blockers, IT* flop_groupby_col_blockers, IT& total_flop) { #pragma omp parallel for reduction(+ : flop_groupby_row_blockers[:num_blockers]) reduction(+ : flop_groupby_col_blockers[:num_blockers*num_blockers]) for (IT i = startIdx; i < endIdx; ++i) { IT rownnz = B.rowptr[i + 1] - B.rowptr[i]; for (IT j = A.colptr[i]; j < A.colptr[i + 1]; ++j) { uint16_t row_blocker_id = A.rowids[j] / nrows_per_blocker; uint16_t col_blocker_id = fast_mod(A.rowids[j], nrows_per_blocker) / ncols_per_blocker; flop_groupby_row_blockers[row_blocker_id] += rownnz; flop_groupby_col_blockers[row_blocker_id * num_blockers + col_blocker_id] += rownnz; } } for (IT i = 0; i < num_blockers; ++i) { total_flop += flop_groupby_row_blockers[i]; } } template <typename IT, typename NT> bool compareTuple (tuple<IT, IT, NT> t1, tuple<IT, IT, NT> t2) { if (std::get<1>(t1) != std::get<1>(t2)) return false; if (std::get<0>(t1) != std::get<0>(t2)) return false; return true; } template <typename IT, typename NT> int64_t getReqMemory(const CSC<IT, NT>& A, const CSR<IT, NT>& B) { uint64_t flop = getFlop(A, B); return flop * sizeof(int64_t); } struct ExtractKey { inline int64_t operator()(tuple<int32_t, int32_t, double> tup) { int64_t res = std::get<0>(tup); res = (res << 32); res = res | (int64_t)(uint32_t) std::get<1>(tup); return res; } }; struct ExtractKey2 { inline uint32_t operator()(tuple<int32_t, int32_t, double> tup) { return (((fast_mod(fast_mod(std::get<0>(tup), 32768), 128)) << 24) | (uint32_t) std::get<1>(tup)); } }; template <typename IT, typename NT> void doRadixSort(tuple<IT, IT, NT>* begin, tuple<IT, IT, NT>* end, tuple<IT, IT, NT>* buffer) { radix_sort(begin, end, buffer, ExtractKey2()); } template <typename IT, typename NT> IT doMerge(tuple<IT, IT, NT>* vec, IT length) { if (length == 0) return 0; IT i = 0; IT j = 1; while (i < length && j < length) { if (j < length && compareTuple(vec[i], vec[j])) std::get<2>(vec[i]) += std::get<2>(vec[j]); else vec[++i] = std::move(vec[j]); ++j; } return i + 1; } template <typename IT> void initializeBlockerBoundary(IT* nums_per_col_blocker, uint16_t num_blockers, IT* blocker_begin_ptr, IT* blocker_end_ptr) { blocker_begin_ptr[0] = 0; blocker_end_ptr[0] = 0; for (uint16_t blocker_index = 1; blocker_index < num_blockers; ++blocker_index) { blocker_begin_ptr[blocker_index] = blocker_begin_ptr[blocker_index - 1] + nums_per_col_blocker[blocker_index - 1]; blocker_end_ptr[blocker_index] = blocker_begin_ptr[blocker_index]; } } template <typename IT, typename NT> void OuterSpGEMM_stage(const CSC<IT, NT>& A, const CSR<IT, NT>& B, IT startIdx, IT endIdx, CSR<IT, NT>& C, \ int nblockers, int nblockchars) { typedef tuple<IT, IT, NT> TripleNode; const uint16_t nthreads = omp_get_max_threads(); const uint16_t num_blockers = nblockers; const uint16_t block_width = nblockchars; ncols_of_A = A.cols; nrows_per_blocker = A.rows <= num_blockers * 64 ? 64 : (A.rows + num_blockers - 1) / num_blockers; ncols_per_blocker = nrows_per_blocker <= (num_blockers - 1) * 2 ? 2 : (nrows_per_blocker + num_blockers - 1) / num_blockers; IT total_nnz = 0; IT total_flop = 0; IT* row_blocker_begin_ptr = new IT[num_blockers](); IT* row_blocker_end_ptr = new IT[num_blockers](); IT* flop_groupby_row_blockers = new IT[num_blockers](); IT* nnz_by_row = new IT[A.rows](); IT* flop_groupby_col_blockers = new IT[num_blockers * num_blockers](); do_symbolic(A, B, 0, A.rows, nrows_per_blocker, ncols_per_blocker, num_blockers, flop_groupby_row_blockers, flop_groupby_col_blockers, total_flop); TripleNode* global_blockers = static_cast<TripleNode*>(operator new(sizeof(TripleNode[total_flop]))); // calc prefix sum initializeBlockerBoundary(flop_groupby_row_blockers, num_blockers, row_blocker_begin_ptr, row_blocker_end_ptr); TripleNode* local_blockers = static_cast<TripleNode*> \ (operator new(sizeof(TripleNode[block_width * num_blockers * nthreads]))); IT* size_of_local_blockers = new IT[num_blockers * nthreads](); #pragma omp parallel { uint16_t thread_id = omp_get_thread_num(); // computing phase #pragma omp for for (IT idx = startIdx; idx < endIdx; ++idx) for (IT j = A.colptr[idx]; j < A.colptr[idx + 1]; ++j) // ncols(A) * 4 { IT rowid = A.rowids[j]; // nnz(A) * 4 uint16_t row_blocker_index = rowid / nrows_per_blocker; IT local_blocker_size_offset = thread_id * num_blockers + row_blocker_index; IT local_blocker_offset = local_blocker_size_offset * block_width; for (IT k = B.rowptr[idx]; k < B.rowptr[idx + 1]; ++k) // nrows(B) * 4 { local_blockers[local_blocker_offset + size_of_local_blockers[local_blocker_size_offset]++] = std::move(TripleNode(rowid, B.colids[k], A.values[j] * B.values[k])); // flop * (4 + 4 + 8 + 8) if (size_of_local_blockers[local_blocker_size_offset] == block_width) // flop * 16 { std::memcpy( global_blockers + __sync_fetch_and_add(&row_blocker_end_ptr[row_blocker_index], block_width), local_blockers + local_blocker_offset, block_width * sizeof(TripleNode) ); size_of_local_blockers[local_blocker_size_offset] = 0; } } } for (uint16_t row_blocker_index = 0; row_blocker_index < num_blockers; row_blocker_index++) { IT local_blocker_size_offset = thread_id * num_blockers + row_blocker_index; IT local_blocker_offset = local_blocker_size_offset * block_width; std::memcpy( global_blockers + __sync_fetch_and_add(&row_blocker_end_ptr[row_blocker_index], size_of_local_blockers[local_blocker_size_offset]), local_blockers + local_blocker_offset, size_of_local_blockers[local_blocker_size_offset] * sizeof(TripleNode) ); size_of_local_blockers[local_blocker_size_offset] = 0; } } vector<TripleNode*> flop_space = vector<TripleNode*>(num_blockers); for (uint16_t row_blocker_index = 0; row_blocker_index < num_blockers; ++row_blocker_index) flop_space[row_blocker_index] = static_cast<TripleNode*>(operator new(sizeof(TripleNode[flop_groupby_row_blockers[row_blocker_index]]))); IT max_flops_in_col_blockers = 0; IT avg_flops_in_col_blockers = 0; IT* nnz_per_row_blocker = new IT[num_blockers](); IT* nnz_per_col_blocker = static_cast<IT*>(operator new(sizeof(IT[num_blockers * num_blockers]))); IT* col_blocker_begin_ptr = static_cast<IT*>(operator new(sizeof(IT[num_blockers * num_blockers]))); IT* col_blocker_end_ptr = static_cast<IT*>(operator new(sizeof(IT[num_blockers * num_blockers]))); #pragma omp parallel for reduction(max : max_flops_in_col_blockers) reduction(+ : avg_flops_in_col_blockers) for (IT i = 0; i < num_blockers * num_blockers; ++i) { nnz_per_col_blocker[i] = 0; col_blocker_begin_ptr[i] = 0; col_blocker_end_ptr[i] = 0; avg_flops_in_col_blockers += flop_groupby_col_blockers[i]; max_flops_in_col_blockers = max(max_flops_in_col_blockers, flop_groupby_col_blockers[i]); } TripleNode* sorting_buffer = static_cast<TripleNode*>(operator new(sizeof(TripleNode[max_flops_in_col_blockers * nthreads + 1]))); // each thread handle a row partition #pragma omp parallel { uint16_t thread_id = omp_get_thread_num(); #pragma omp for for (uint16_t row_blocker_index = 0; row_blocker_index < num_blockers; ++row_blocker_index) { IT row_base_index = row_blocker_index * num_blockers; initializeBlockerBoundary(flop_groupby_col_blockers + row_blocker_index * num_blockers, num_blockers, col_blocker_begin_ptr + row_base_index, col_blocker_end_ptr + row_base_index); for (IT rowptr = row_blocker_begin_ptr[row_blocker_index]; rowptr < row_blocker_end_ptr[row_blocker_index]; ++rowptr) { uint16_t col_blocker_index = fast_mod(std::get<0>(global_blockers[rowptr]), nrows_per_blocker) / ncols_per_blocker; IT local_blocker_size_offset = thread_id * num_blockers + col_blocker_index; IT local_blocker_offset = local_blocker_size_offset * block_width; local_blockers[local_blocker_offset + size_of_local_blockers[local_blocker_size_offset]++] = std::move(global_blockers[rowptr]); if (size_of_local_blockers[local_blocker_size_offset] == block_width) { std::memcpy( flop_space[row_blocker_index] + (col_blocker_end_ptr + row_base_index)[col_blocker_index], local_blockers + local_blocker_offset, block_width * sizeof(TripleNode) ); (col_blocker_end_ptr + row_base_index)[col_blocker_index] += block_width; size_of_local_blockers[local_blocker_size_offset] = 0; } } for (uint16_t col_blocker_index = 0; col_blocker_index < num_blockers; col_blocker_index++) { IT local_blocker_size_offset = thread_id * num_blockers + col_blocker_index; IT local_blocker_offset = local_blocker_size_offset * block_width; std::memcpy( flop_space[row_blocker_index] + (col_blocker_end_ptr + row_base_index)[col_blocker_index], local_blockers + local_blocker_offset, sizeof(TripleNode) * size_of_local_blockers[local_blocker_size_offset] ); (col_blocker_end_ptr + row_base_index)[col_blocker_index] += size_of_local_blockers[local_blocker_size_offset]; size_of_local_blockers[local_blocker_size_offset] = 0; } } } #pragma omp parallel { uint16_t thread_id = omp_get_thread_num(); #pragma omp for for (uint16_t row_blocker_index = 0; row_blocker_index < num_blockers; ++row_blocker_index) { IT row_base_index = row_blocker_index * num_blockers; for (uint16_t col_blocker_index = 0; col_blocker_index < num_blockers; col_blocker_index++) { doRadixSort(flop_space[row_blocker_index] + (col_blocker_begin_ptr + row_base_index)[col_blocker_index], flop_space[row_blocker_index] + (col_blocker_end_ptr + row_base_index)[col_blocker_index], sorting_buffer + thread_id * max_flops_in_col_blockers); } } } #pragma omp parallel { uint16_t thread_id = omp_get_thread_num(); #pragma omp for for (uint16_t row_blocker_index = 0; row_blocker_index < num_blockers; ++row_blocker_index) { IT row_base_index = row_blocker_index * num_blockers; for (uint16_t col_blocker_index = 0; col_blocker_index < num_blockers; col_blocker_index++) { IT before = (col_blocker_end_ptr + row_base_index)[col_blocker_index] - (col_blocker_begin_ptr + row_base_index)[col_blocker_index]; IT after = doMerge(flop_space[row_blocker_index] + (col_blocker_begin_ptr + row_base_index)[col_blocker_index], before); // col_blocker_end_ptr[col_blocker_index] = col_blocker_begin_ptr[col_blocker_index] + after; nnz_per_row_blocker[row_blocker_index] += after; nnz_per_col_blocker[row_blocker_index * num_blockers + col_blocker_index] = after; __sync_fetch_and_add(&total_nnz, after); } } // outer-most row-wise for loop } // outer-most parellel block IT *cumulative_colid_indices = new IT[num_blockers * num_blockers + 1](); IT *cumulative_col_blocker_indices = new IT[num_blockers * num_blockers + 1](); scan(nnz_per_col_blocker, cumulative_colid_indices, (IT)(num_blockers * num_blockers)); if (C.isEmpty()) { C.make_empty(); } C.rows = A.rows; C.cols = B.cols; C.colids = static_cast<IT*>(operator new(sizeof(IT[total_nnz]))); C.values = static_cast<NT*>(operator new(sizeof(NT[total_nnz]))); C.rowptr = static_cast<IT*>(operator new(sizeof(IT[C.rows + 1]))); C.rowptr[0] = 0; #pragma omp parallel for for (uint16_t row_blocker_index = 0; row_blocker_index < num_blockers; ++row_blocker_index) for (uint16_t col_blocker_index = 0; col_blocker_index < num_blockers; col_blocker_index++) { scan(flop_groupby_col_blockers + row_blocker_index * num_blockers, cumulative_col_blocker_indices + row_blocker_index * num_blockers, (IT)(num_blockers)); IT base = cumulative_colid_indices[row_blocker_index * num_blockers + col_blocker_index]; auto space_addr = flop_space[row_blocker_index] + cumulative_col_blocker_indices[row_blocker_index * num_blockers + col_blocker_index]; for (IT index = 0; index < nnz_per_col_blocker[row_blocker_index * num_blockers + col_blocker_index]; ++index) { ++nnz_by_row[std::get<0>(space_addr[index])]; C.colids[base + index] = std::get<1>(space_addr[index]); C.values[base + index] = std::get<2>(space_addr[index]); } } scan(nnz_by_row, C.rowptr, C.rows + 1); C.nnz = total_nnz; my_free<TripleNode>(global_blockers); my_free<TripleNode>(local_blockers); my_free<IT>(size_of_local_blockers); my_free<IT>(row_blocker_begin_ptr); my_free<IT>(row_blocker_end_ptr); my_free<IT>(flop_groupby_row_blockers); my_free<IT>(flop_groupby_col_blockers); my_free<IT>(nnz_by_row); my_free<IT>(nnz_per_row_blocker); my_free<IT>(nnz_per_col_blocker); my_free<IT>(col_blocker_begin_ptr); my_free<IT>(col_blocker_end_ptr); my_free<IT>(cumulative_colid_indices); my_free<IT>(cumulative_col_blocker_indices); for (uint16_t row_blocker_index = 0; row_blocker_index < num_blockers; ++row_blocker_index) my_free<TripleNode>(flop_space[row_blocker_index]); } template <typename IT, typename NT> void OuterSpGEMM(const CSC<IT, NT>& A, const CSR<IT, NT>& B, CSR<IT, NT>& C, int nblockers, int nblockchars) { OuterSpGEMM_stage(A, B, 0, A.rows, C, nblockers, nblockchars); }
blake2bp-ref.c
/* BLAKE2 reference source code package - reference C implementations Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #include <assert.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 4 /* blake2b_init_param defaults to setting the expecting output length from the digest_length parameter block field. In some cases, however, we do not want this, as the output length of these instances is given by inner_length instead. */ static int blake2bp_init_leaf_param( blake2b_state *S, const blake2b_param *P ) { int err = blake2b_init_param(S, P); S->outlen = P->inner_length; return err; } static int blake2bp_init_leaf( blake2b_state *S, size_t outlen, size_t keylen, uint64_t offset ) { blake2b_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store32( &P->node_offset, offset ); store32( &P->xof_length, 0 ); P->node_depth = 0; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); int result = blake2bp_init_leaf_param( S, P ); return result; } static int blake2bp_init_root( blake2b_state *S, size_t outlen, size_t keylen ) { blake2b_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store32( &P->node_offset, 0 ); store32( &P->xof_length, 0 ); P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); int result = blake2b_init_param( S, P ); return result; } int blake2bp_init( blake2bp_state *S, size_t outlen ) { size_t i; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; // root if( blake2bp_init_root( S->R, outlen, 0 ) < 0 ) { return -1; } for( i = 0; i < PARALLELISM_DEGREE; ++i ) { if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) { return -1; } } S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen ) { size_t i; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2bp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) { if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) { return -1; } } S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) { blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES ); } secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); } return 0; } int blake2bp_update( blake2bp_state *S, const void *pin, size_t inlen ) { const unsigned char * in = (const unsigned char *)pin; size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; size_t i; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) { blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); } in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S->S[i], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2bp_final( blake2bp_state *S, void *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; size_t i; if(out == NULL || outlen < S->outlen) { return -1; } for( i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2B_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES; if( left > BLAKE2B_BLOCKBYTES ) { left = BLAKE2B_BLOCKBYTES; } blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left ); } blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES ); } for( i = 0; i < PARALLELISM_DEGREE; ++i ) { blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES ); } return blake2b_final( S->R, out, S->outlen ); } // 実質実行関数(この内部で並列化が走る) int blake2bp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ) { uint8_t* p = key; uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; blake2b_state S[PARALLELISM_DEGREE][1]; blake2b_state FS[1]; size_t i; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if( NULL == key && keylen > 0 ) return -1; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( keylen > BLAKE2B_KEYBYTES ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) { if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) { return -1; } } S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */ if( keylen > 0 ) { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) { blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES ); } secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) assert(false); #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; // 4ブロックに分けた先頭位置へ移動する in__ += i * BLAKE2B_BLOCKBYTES; while (inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES) { blake2b_update( S[i], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } if( inlen__ > i * BLAKE2B_BLOCKBYTES ) { const size_t left = inlen__ - i * BLAKE2B_BLOCKBYTES; const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES; blake2b_update( S[i], in__, len ); } // hash[i] の初めての出力 blake2b_final( S[i], hash[i], BLAKE2B_OUTBYTES ); } if (blake2bp_init_root( FS, outlen, keylen ) < 0) return -1; FS->last_node = 1; /* Mark as last node */ for( i = 0; i < PARALLELISM_DEGREE; ++i ) { blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES ); } return blake2b_final( FS, out, outlen );; } #if defined(BLAKE2BP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( void ) { uint8_t key[BLAKE2B_KEYBYTES]; uint8_t buf[BLAKE2_KAT_LENGTH]; size_t i, step; for( i = 0; i < BLAKE2B_KEYBYTES; ++i ) { key[i] = (uint8_t)i; } for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) buf[i] = (uint8_t)i; /* Test simple API */ // keyあり BLAKE2_KAT_LENGTH(=256) 回のテスト for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2B_OUTBYTES]; blake2bp( hash, BLAKE2B_OUTBYTES, buf, i, key, BLAKE2B_KEYBYTES ); if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) ) { goto fail; } } /* Test streaming API */ /* for(step = 1; step < BLAKE2B_BLOCKBYTES; ++step) { for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) { uint8_t hash[BLAKE2B_OUTBYTES]; blake2bp_state S; uint8_t * p = buf; size_t mlen = i; int err = 0; if( (err = blake2bp_init_key(&S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES)) < 0 ) { goto fail; } while (mlen >= step) { if ( (err = blake2bp_update(&S, p, step)) < 0 ) { goto fail; } mlen -= step; p += step; } if ( (err = blake2bp_update(&S, p, mlen)) < 0) { goto fail; } if ( (err = blake2bp_final(&S, hash, BLAKE2B_OUTBYTES)) < 0) { goto fail; } if (0 != memcmp(hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES)) { goto fail; } } } */ puts( "ok" ); return 0; fail: puts("error"); return -1; } #endif
model.h
#ifndef _OBJ_H_ #define _OBJ_H_ #include <vector> #include <map> #include <regex> #include <glm.hpp> #include "bmp.h" #include "hdr.h" #include "tools.h" /* load picture, +x = right, +y = up */ class Picture { public: bool empty_ = true; std::string filename_; int w, h; std::vector<glm::dvec3> data_; Picture() {} void load(std::string filename) { std::cout << "[Picture] " << filename << std::endl; filename_ = filename; int i = filename.size() - 3; if (filename.substr(i, 3) == "bmp") load_bmp(filename); else if (filename.substr(i, 3) == "hdr") load_hdr(filename); } void load_bmp(std::string filename) { Bitmap bmp(filename.c_str()); if (bmp.GetBits() != NULL) { w = bmp.GetW(); h = bmp.GetH(); for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { int idx = (h - 1 - y) * w + x; glm::dvec3 color = bmp.GetColor(idx); data_.push_back(color); } } empty_ = false; } } void load_hdr(std::string filename) { HDRLoader hdr(filename.c_str()); if (hdr.success) { std::cout << filename << std::endl; w = hdr.color.width; h = hdr.color.height; for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { int idx = (h - 1 - y) * w + x; glm::dvec3 color = hdr.GetColor(idx); data_.push_back(color); } } empty_ = false; } } inline glm::dvec3 Sample2D(glm::dvec2 uv) { int x1 = uv.x * w; int y1 = uv.y * h; x1 = between(0, w, x1); y1 = between(0, h, y1); return data_[y1 * w + x1]; } }; struct Material { glm::dvec3 Kd = glm::dvec3(1, 1, 1); // diffuse, 反射光线系数,0 表示吸收所有光线. glm::dvec3 Ks = glm::dvec3(0, 0, 0); // specular, 高光反射系数. glm::dvec3 Kr = glm::dvec3(0, 0, 0); // 折射透明度, 0 表示不透明. glm::dvec3 Le = glm::dvec3(0, 0, 0); // 自发光. double kd = 1; // diffuse fraction, Monte Carlo double ks = 0; // specular fraction, Monte Carlo double kr = 0; // reflect fraction, Monte Carlo double Nr = 1; // 物质折射率.. double Ns = 1; // Phong 高光反射参数. Picture Map_Kd; // 纹理贴图. }; class Model { public: std::vector<glm::dvec3> vertex_; std::vector<glm::dvec3> norm_; std::vector<glm::dvec2> uv_coord_; // material_[0] is default, no matter .mtl file exists or not std::vector<Material> material_; // 3 line of {vertex_idx, norm_idx, uv_coord_idx, material_idx} std::vector<glm::imat3x4> face_; // idx start from 0 std::map<std::string, int> material_name; // find material idx by name std::string dir_path; // 根目录.. ~Model() {} Model(std::string filename) { std::cout << "[Model] " << filename << std::endl; std::smatch result; // get the path of obj file if (regex_search(filename, result, std::regex("(.*/)[^/]+")) && result.size() == 2) dir_path = result[1]; else dir_path = ""; std::ifstream fs; fs.open(filename); if (!fs.is_open()) { std::cout << "[error] open " << filename << " failed!\n"; return; } std::string str((std::istreambuf_iterator<char>(fs)), std::istreambuf_iterator<char>()); std::vector<std::string> lines; int start = 0; for (int i = 0; i < str.size(); ++i) { if (str[i] == '\n') { lines.push_back(str.substr(start, i - start + 1)); start = i + 1; } } // the first material is always the default material; material_.push_back(Material()); std::vector<std::string> v_; std::vector<std::string> vn_; std::vector<std::string> vt_; std::vector<std::string> f_; for (auto line : lines) { if (line[0] == 'm') // mtllib { std::smatch res; if (regex_search(line, res, std::regex("mtllib\\s+(\\S+)"))) load_material(res[1]); } else if (line[0] == 'v') { if (line[1] == ' ') // v v_.push_back(line); else if (line[1] == 'n') // vn vn_.push_back(line); else if (line[1] == 't') // vt vt_.push_back(line); } else if (line[0] == 'f' && line[1] == ' ') // f { f_.push_back(line); } else if (line[0] == 'u' && line[1] == 's') // usemtl { f_.push_back(line); } } #pragma omp sections { #pragma omp section { for (auto line : v_) { char trash; glm::dvec3 temp; std::istringstream ss(line); ss >> trash >> temp[0] >> temp[1] >> temp[2]; vertex_.push_back(temp); } } #pragma omp section { for (auto line : vn_) { char trash; glm::dvec3 temp; std::istringstream ss(line); ss >> trash >> trash >> temp[0] >> temp[1] >> temp[2]; norm_.push_back(temp); } } #pragma omp section { for (auto line : vt_) { char trash; glm::dvec2 temp; std::istringstream ss(line); ss >> trash >> trash >> temp[0] >> temp[1]; uv_coord_.push_back(temp); } } #pragma omp section { int curr_material_id = 0; for (auto line : f_) { if (line[0] == 'u') { std::smatch res; if (regex_search(line, res, std::regex("usemtl\\s+(\\S+)"))) curr_material_id = material_name[std::string(res[1])]; } else { char trash; glm::imat3x4 face; std::istringstream ss(line); ss >> trash; for (int i = 0; i < 3; ++i) { ss >> face[i][0] >> trash >> face[i][1] >> trash >> face[i][2]; face[i][0]--; face[i][1]--; face[i][2]--; face[i][3] = curr_material_id; } face_.push_back(face); } } } } } inline void load_material(std::string filename) { std::ifstream fs; fs.open(dir_path + filename); if (!fs.is_open()) { std::cout << "[error] open " << filename << " failed!\n"; return; } std::string line; std::smatch result; // read .mtl file while (getline(fs, line)) { if (regex_search(line, std::regex("\\s*#")) || regex_match(line, std::regex("\\s*"))) continue; // 'newmtl' find, add a default material to array if (regex_search(line, result, std::regex("newmtl\\s+(\\S+)")) && result.size() == 2) { material_name[std::string(result[1])] = material_.size(); material_.push_back(Material()); continue; } // update material details if (regex_search(line, result, std::regex("\\s*Kd\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)"))) material_.back().Kd = {tofloat(result[1]), tofloat(result[2]), tofloat(result[3])}; else if (regex_search(line, result, std::regex("\\s*Ks\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)"))) material_.back().Ks = {tofloat(result[1]), tofloat(result[2]), tofloat(result[3])}; else if (regex_search(line, result, std::regex("\\s*Kr\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)"))) material_.back().Kr = {tofloat(result[1]), tofloat(result[2]), tofloat(result[3])}; else if (regex_search(line, result, std::regex("\\s*Le\\s+(\\S+)\\s+(\\S+)\\s+(\\S+)"))) material_.back().Le = {tofloat(result[1]), tofloat(result[2]), tofloat(result[3])}; else if (regex_search(line, result, std::regex("\\s*Ns\\s+(\\S+)"))) material_.back().Ns = tofloat(result[1]); else if (regex_search(line, result, std::regex("\\s*Nr\\s+(\\S+)"))) material_.back().Nr = tofloat(result[1]); else if (regex_search(line, result, std::regex("map_Kd\\s+(\\S+)"))) material_.back().Map_Kd.load(dir_path + std::string(result[1])); // load picture else continue; } for (Material &i : material_) { double kd = max3(i.Kd.x, i.Kd.y, i.Kd.z); double ks = max3(i.Ks.x, i.Ks.y, i.Ks.z); double kr = max3(i.Kr.x, i.Kr.y, i.Kr.z); double _sum = kd + ks + kr; // kd + ks + kr <= 1; if (_sum > 1) { kd /= _sum; ks /= _sum; kr /= _sum; i.Kd /= _sum; i.Ks /= _sum; i.Kr /= _sum; } // 调整加权系数,使得 Kd * kd + Ks * ks + Kr * kr = (1,1,1) if (kd > 0) i.Kd /= kd; if (ks > 0) i.Ks /= ks; if (kr > 0) i.Kr /= kr; i.kd = kd; i.ks = ks; i.kr = kr; } } }; #endif
bml_copy_ellblock_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_allocate.h" #include "../bml_copy.h" #include "../bml_types.h" #include "bml_allocate_ellblock.h" #include "bml_copy_ellblock.h" #include "bml_types_ellblock.h" #include <assert.h> #include <complex.h> #include <stdlib.h> #include <string.h> /** Copy an ellblock matrix - result is a new matrix. * * \ingroup copy_group * * \param A The matrix to be copied * \return A copy of matrix A. */ bml_matrix_ellblock_t * TYPED_FUNC(bml_copy_ellblock_new) (bml_matrix_ellblock_t * A) { bml_matrix_ellblock_t *B = TYPED_FUNC(bml_block_matrix_ellblock) (A->NB, A->MB, A->M, A->bsize, A->distribution_mode); int NB = A->NB; int MB = A->MB; int *A_indexb = A->indexb; REAL_T **A_ptr_value = (REAL_T **) A->ptr_value; int *B_indexb = B->indexb; REAL_T **B_ptr_value = (REAL_T **) B->ptr_value; memcpy(B->nnzb, A->nnzb, sizeof(int) * A->NB); memcpy(B_indexb, A_indexb, NB * MB * sizeof(int)); #pragma omp parallel for for (int ib = 0; ib < NB; ib++) { assert(B->bsize[ib] > 0); assert(B->bsize[ib] < 10); for (int jp = 0; jp < A->nnzb[ib]; jp++) { int ind = ROWMAJOR(ib, jp, NB, MB); int jb = B_indexb[ind]; assert(jb < NB); assert(B_ptr_value[ind] == NULL); int nelements = A->bsize[ib] * A->bsize[jb]; B_ptr_value[ind] = TYPED_FUNC(bml_allocate_block_ellblock) (B, ib, nelements); assert(B_ptr_value[ind] != NULL); memcpy(B_ptr_value[ind], A_ptr_value[ind], nelements * sizeof(REAL_T)); } } return B; } /** Copy an ellblock matrix. * * \ingroup copy_group * * \param A The matrix to be copied * \param B Copy of matrix A */ void TYPED_FUNC( bml_copy_ellblock) ( bml_matrix_ellblock_t * A, bml_matrix_ellblock_t * B) { assert(A->NB == B->NB); int NB = A->NB; int MB = A->MB; REAL_T **A_ptr_value = (REAL_T **) A->ptr_value; REAL_T **B_ptr_value = (REAL_T **) B->ptr_value; memcpy(B->nnzb, A->nnzb, sizeof(int) * A->NB); int *A_indexb = A->indexb; int *B_indexb = B->indexb; memcpy(B_indexb, A_indexb, NB * MB * sizeof(int)); #pragma omp parallel for for (int ib = 0; ib < NB; ib++) { for (int jp = 0; jp < A->nnzb[ib]; jp++) { int ind = ROWMAJOR(ib, jp, NB, MB); assert(A_ptr_value[ind] != NULL); int jb = B_indexb[ind]; int nelements = A->bsize[ib] * A->bsize[jb]; if (B_ptr_value[ind] == NULL) B_ptr_value[ind] = TYPED_FUNC(bml_allocate_block_ellblock) (B, ib, nelements); assert(B_ptr_value[ind] != NULL); memcpy(B_ptr_value[ind], A_ptr_value[ind], nelements * sizeof(REAL_T)); } } } /** Reorder an ellblock matrix. * * \ingroup copy_group * * \param A The matrix to be reordered * \param B The permutation vector */ void TYPED_FUNC( bml_reorder_ellblock) ( bml_matrix_ellblock_t * A, int *perm) { int NB = A->NB; int MB = A->MB; int *A_indexb = A->indexb; int *A_nnzb = A->nnzb; REAL_T **A_ptr_value = (REAL_T **) A->ptr_value; int *A_bsize = A->bsize; bml_matrix_ellblock_t *B = bml_copy_new(A); int *B_indexb = B->indexb; int *B_nnzb = B->nnzb; REAL_T **B_ptr_value = (REAL_T **) B->ptr_value; int *B_bsize = B->bsize; for (int i = 0; i < NB; i++) { A_bsize[i] = B_bsize[perm[i]]; } // Reorder rows - need to copy for (int ib = 0; ib < NB; ib++) { memcpy(&A_indexb[ROWMAJOR(perm[ib], 0, NB, MB)], &B_indexb[ROWMAJOR(ib, 0, NB, MB)], MB * sizeof(int)); int count = 0; for (int jp = 0; jp < MB; jp++) { int ind = ROWMAJOR(ib, jp, NB, MB); int jb = B_indexb[ind]; count += B_bsize[jb]; } memcpy(A_ptr_value[ROWMAJOR(perm[ib], 0, NB, MB)], B_ptr_value[ROWMAJOR(ib, 0, NB, MB)], B_bsize[ib] * count * sizeof(REAL_T)); A_nnzb[perm[ib]] = B_nnzb[ib]; } bml_deallocate_ellblock(B); // Reorder elements in each row - just change index for (int ib = 0; ib < NB; ib++) { for (int jp = 0; jp < A_nnzb[ib]; jp++) { A_indexb[ROWMAJOR(ib, jp, NB, MB)] = perm[A_indexb[ROWMAJOR(ib, jp, NB, MB)]]; } } }
GB_unaryop__ainv_uint8_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_uint64 // op(A') function: GB_tran__ainv_uint8_uint64 // C type: uint8_t // A type: uint64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_uint64 ( uint8_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cxx-pretty-print.c
/* Implementation of subroutines for the GNU C++ pretty-printer. Copyright (C) 2003-2020 Free Software Foundation, Inc. Contributed by Gabriel Dos Reis <gdr@integrable-solutions.net> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "cp-tree.h" #include "cxx-pretty-print.h" #include "tree-pretty-print.h" static void pp_cxx_unqualified_id (cxx_pretty_printer *, tree); static void pp_cxx_nested_name_specifier (cxx_pretty_printer *, tree); static void pp_cxx_qualified_id (cxx_pretty_printer *, tree); static void pp_cxx_template_argument_list (cxx_pretty_printer *, tree); static void pp_cxx_type_specifier_seq (cxx_pretty_printer *, tree); static void pp_cxx_ptr_operator (cxx_pretty_printer *, tree); static void pp_cxx_parameter_declaration_clause (cxx_pretty_printer *, tree); static void pp_cxx_template_parameter (cxx_pretty_printer *, tree); static void pp_cxx_cast_expression (cxx_pretty_printer *, tree); static void pp_cxx_typeid_expression (cxx_pretty_printer *, tree); static void pp_cxx_unary_left_fold_expression (cxx_pretty_printer *, tree); static void pp_cxx_unary_right_fold_expression (cxx_pretty_printer *, tree); static void pp_cxx_binary_fold_expression (cxx_pretty_printer *, tree); static void pp_cxx_concept_definition (cxx_pretty_printer *, tree); static inline void pp_cxx_nonconsecutive_character (cxx_pretty_printer *pp, int c) { const char *p = pp_last_position_in_text (pp); if (p != NULL && *p == c) pp_cxx_whitespace (pp); pp_character (pp, c); pp->padding = pp_none; } #define pp_cxx_expression_list(PP, T) \ pp_c_expression_list (PP, T) #define pp_cxx_space_for_pointer_operator(PP, T) \ pp_c_space_for_pointer_operator (PP, T) #define pp_cxx_init_declarator(PP, T) \ pp_c_init_declarator (PP, T) #define pp_cxx_call_argument_list(PP, T) \ pp_c_call_argument_list (PP, T) void pp_cxx_colon_colon (cxx_pretty_printer *pp) { pp_colon_colon (pp); pp->padding = pp_none; } void pp_cxx_begin_template_argument_list (cxx_pretty_printer *pp) { pp_cxx_nonconsecutive_character (pp, '<'); } void pp_cxx_end_template_argument_list (cxx_pretty_printer *pp) { pp_cxx_nonconsecutive_character (pp, '>'); } void pp_cxx_separate_with (cxx_pretty_printer *pp, int c) { pp_separate_with (pp, c); pp->padding = pp_none; } /* Expressions. */ /* conversion-function-id: operator conversion-type-id conversion-type-id: type-specifier-seq conversion-declarator(opt) conversion-declarator: ptr-operator conversion-declarator(opt) */ static inline void pp_cxx_conversion_function_id (cxx_pretty_printer *pp, tree t) { pp_cxx_ws_string (pp, "operator"); pp_cxx_type_specifier_seq (pp, TREE_TYPE (t)); } static inline void pp_cxx_template_id (cxx_pretty_printer *pp, tree t) { pp_cxx_unqualified_id (pp, TREE_OPERAND (t, 0)); pp_cxx_begin_template_argument_list (pp); pp_cxx_template_argument_list (pp, TREE_OPERAND (t, 1)); pp_cxx_end_template_argument_list (pp); } /* Prints the unqualified part of the id-expression T. unqualified-id: identifier operator-function-id conversion-function-id ~ class-name template-id */ static void pp_cxx_unqualified_id (cxx_pretty_printer *pp, tree t) { enum tree_code code = TREE_CODE (t); switch (code) { case RESULT_DECL: pp->translate_string ("<return-value>"); break; case OVERLOAD: t = OVL_FIRST (t); /* FALLTHRU */ case VAR_DECL: case PARM_DECL: case CONST_DECL: case TYPE_DECL: case FUNCTION_DECL: case NAMESPACE_DECL: case FIELD_DECL: case LABEL_DECL: case USING_DECL: case TEMPLATE_DECL: t = DECL_NAME (t); /* FALLTHRU */ case IDENTIFIER_NODE: if (t == NULL) pp->translate_string ("<unnamed>"); else if (IDENTIFIER_CONV_OP_P (t)) pp_cxx_conversion_function_id (pp, t); else pp_cxx_tree_identifier (pp, t); break; case TEMPLATE_ID_EXPR: pp_cxx_template_id (pp, t); break; case BASELINK: pp_cxx_unqualified_id (pp, BASELINK_FUNCTIONS (t)); break; case RECORD_TYPE: case UNION_TYPE: case ENUMERAL_TYPE: case TYPENAME_TYPE: case UNBOUND_CLASS_TEMPLATE: pp_cxx_unqualified_id (pp, TYPE_NAME (t)); if (tree ti = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (t)) { pp_cxx_begin_template_argument_list (pp); tree args = INNERMOST_TEMPLATE_ARGS (TI_ARGS (ti)); pp_cxx_template_argument_list (pp, args); pp_cxx_end_template_argument_list (pp); } break; case BIT_NOT_EXPR: pp_cxx_complement (pp); pp_cxx_unqualified_id (pp, TREE_OPERAND (t, 0)); break; case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: if (template_placeholder_p (t)) { t = TREE_TYPE (CLASS_PLACEHOLDER_TEMPLATE (t)); pp_cxx_unqualified_id (pp, TYPE_IDENTIFIER (t)); pp_string (pp, "<...auto...>"); } else if (TYPE_IDENTIFIER (t)) pp_cxx_unqualified_id (pp, TYPE_IDENTIFIER (t)); else pp_cxx_canonical_template_parameter (pp, t); break; case TEMPLATE_PARM_INDEX: pp_cxx_unqualified_id (pp, TEMPLATE_PARM_DECL (t)); break; case BOUND_TEMPLATE_TEMPLATE_PARM: pp_cxx_cv_qualifier_seq (pp, t); pp_cxx_unqualified_id (pp, TYPE_IDENTIFIER (t)); pp_cxx_begin_template_argument_list (pp); pp_cxx_template_argument_list (pp, TYPE_TI_ARGS (t)); pp_cxx_end_template_argument_list (pp); break; default: pp_unsupported_tree (pp, t); break; } } /* Pretty-print out the token sequence ":: template" in template codes where it is needed to "inline declare" the (following) member as a template. This situation arises when SCOPE of T is dependent on template parameters. */ static inline void pp_cxx_template_keyword_if_needed (cxx_pretty_printer *pp, tree scope, tree t) { if (TREE_CODE (t) == TEMPLATE_ID_EXPR && TYPE_P (scope) && dependent_type_p (scope)) pp_cxx_ws_string (pp, "template"); } /* nested-name-specifier: class-or-namespace-name :: nested-name-specifier(opt) class-or-namespace-name :: template nested-name-specifier */ static void pp_cxx_nested_name_specifier (cxx_pretty_printer *pp, tree t) { /* FIXME: When diagnosing references to concepts (especially as types?) we end up adding too many '::' to the name. This is partially due to the fact that pp->enclosing_namespace is null. */ if (t == global_namespace) { pp_cxx_colon_colon (pp); } else if (!SCOPE_FILE_SCOPE_P (t) && t != pp->enclosing_scope) { tree scope = get_containing_scope (t); pp_cxx_nested_name_specifier (pp, scope); pp_cxx_template_keyword_if_needed (pp, scope, t); pp_cxx_unqualified_id (pp, t); pp_cxx_colon_colon (pp); } } /* qualified-id: nested-name-specifier template(opt) unqualified-id */ static void pp_cxx_qualified_id (cxx_pretty_printer *pp, tree t) { switch (TREE_CODE (t)) { /* A pointer-to-member is always qualified. */ case PTRMEM_CST: pp_cxx_nested_name_specifier (pp, PTRMEM_CST_CLASS (t)); pp_cxx_unqualified_id (pp, PTRMEM_CST_MEMBER (t)); break; /* In Standard C++, functions cannot possibly be used as nested-name-specifiers. However, there are situations where is "makes sense" to output the surrounding function name for the purpose of emphasizing on the scope kind. Just printing the function name might not be sufficient as it may be overloaded; so, we decorate the function with its signature too. FIXME: This is probably the wrong pretty-printing for conversion functions and some function templates. */ case OVERLOAD: t = OVL_FIRST (t); /* FALLTHRU */ case FUNCTION_DECL: if (DECL_FUNCTION_MEMBER_P (t)) pp_cxx_nested_name_specifier (pp, DECL_CONTEXT (t)); pp_cxx_unqualified_id (pp, DECL_CONSTRUCTOR_P (t) ? DECL_CONTEXT (t) : t); pp_cxx_parameter_declaration_clause (pp, TREE_TYPE (t)); break; case OFFSET_REF: case SCOPE_REF: pp_cxx_nested_name_specifier (pp, TREE_OPERAND (t, 0)); pp_cxx_unqualified_id (pp, TREE_OPERAND (t, 1)); break; default: { tree scope = get_containing_scope (t); if (scope != pp->enclosing_scope) { pp_cxx_nested_name_specifier (pp, scope); pp_cxx_template_keyword_if_needed (pp, scope, t); } pp_cxx_unqualified_id (pp, t); } break; } } /* Given a value e of ENUMERAL_TYPE: Print out the first ENUMERATOR id with value e, if one is found, (including nested names but excluding the enum name if unscoped) else print out the value as a C-style cast (type-id)value. */ static void pp_cxx_enumeration_constant (cxx_pretty_printer *pp, tree e) { tree type = TREE_TYPE (e); tree value = NULL_TREE; /* Find the name of this constant. */ if ((pp->flags & pp_c_flag_gnu_v3) == 0) for (value = TYPE_VALUES (type); value != NULL_TREE; value = TREE_CHAIN (value)) if (tree_int_cst_equal (DECL_INITIAL (TREE_VALUE (value)), e)) break; if (value != NULL_TREE) { if (!ENUM_IS_SCOPED (type)) type = get_containing_scope (type); pp_cxx_nested_name_specifier (pp, type); pp->id_expression (TREE_PURPOSE (value)); } else { /* Value must have been cast. */ pp_c_type_cast (pp, type); pp_c_integer_constant (pp, e); } } void cxx_pretty_printer::constant (tree t) { switch (TREE_CODE (t)) { case STRING_CST: { const bool in_parens = PAREN_STRING_LITERAL_P (t); if (in_parens) pp_cxx_left_paren (this); c_pretty_printer::constant (t); if (in_parens) pp_cxx_right_paren (this); } break; case INTEGER_CST: if (NULLPTR_TYPE_P (TREE_TYPE (t))) { pp_string (this, "nullptr"); break; } else if (TREE_CODE (TREE_TYPE (t)) == ENUMERAL_TYPE) { pp_cxx_enumeration_constant (this, t); break; } /* fall through. */ default: c_pretty_printer::constant (t); break; } } /* id-expression: unqualified-id qualified-id */ void cxx_pretty_printer::id_expression (tree t) { if (TREE_CODE (t) == OVERLOAD) t = OVL_FIRST (t); if (DECL_P (t) && DECL_CONTEXT (t)) pp_cxx_qualified_id (this, t); else pp_cxx_unqualified_id (this, t); } /* user-defined literal: literal ud-suffix */ void pp_cxx_userdef_literal (cxx_pretty_printer *pp, tree t) { pp->constant (USERDEF_LITERAL_VALUE (t)); pp->id_expression (USERDEF_LITERAL_SUFFIX_ID (t)); } /* primary-expression: literal this :: identifier :: operator-function-id :: qualifier-id ( expression ) id-expression GNU Extensions: __builtin_va_arg ( assignment-expression , type-id ) __builtin_offsetof ( type-id, offsetof-expression ) __builtin_addressof ( expression ) __has_nothrow_assign ( type-id ) __has_nothrow_constructor ( type-id ) __has_nothrow_copy ( type-id ) __has_trivial_assign ( type-id ) __has_trivial_constructor ( type-id ) __has_trivial_copy ( type-id ) __has_unique_object_representations ( type-id ) __has_trivial_destructor ( type-id ) __has_virtual_destructor ( type-id ) __is_abstract ( type-id ) __is_base_of ( type-id , type-id ) __is_class ( type-id ) __is_empty ( type-id ) __is_enum ( type-id ) __is_literal_type ( type-id ) __is_pod ( type-id ) __is_polymorphic ( type-id ) __is_std_layout ( type-id ) __is_trivial ( type-id ) __is_union ( type-id ) */ void cxx_pretty_printer::primary_expression (tree t) { switch (TREE_CODE (t)) { case VOID_CST: case INTEGER_CST: case REAL_CST: case COMPLEX_CST: case STRING_CST: constant (t); break; case USERDEF_LITERAL: pp_cxx_userdef_literal (this, t); break; case BASELINK: t = BASELINK_FUNCTIONS (t); /* FALLTHRU */ case VAR_DECL: case PARM_DECL: case FIELD_DECL: case FUNCTION_DECL: case OVERLOAD: case CONST_DECL: case TEMPLATE_DECL: id_expression (t); break; case RESULT_DECL: case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case TEMPLATE_PARM_INDEX: pp_cxx_unqualified_id (this, t); break; case STMT_EXPR: pp_cxx_left_paren (this); statement (STMT_EXPR_STMT (t)); pp_cxx_right_paren (this); break; case TRAIT_EXPR: pp_cxx_trait_expression (this, t); break; case VA_ARG_EXPR: pp_cxx_va_arg_expression (this, t); break; case OFFSETOF_EXPR: pp_cxx_offsetof_expression (this, t); break; case ADDRESSOF_EXPR: pp_cxx_addressof_expression (this, t); break; case REQUIRES_EXPR: pp_cxx_requires_expr (this, t); break; default: c_pretty_printer::primary_expression (t); break; } } /* postfix-expression: primary-expression postfix-expression [ expression ] postfix-expression ( expression-list(opt) ) simple-type-specifier ( expression-list(opt) ) typename ::(opt) nested-name-specifier identifier ( expression-list(opt) ) typename ::(opt) nested-name-specifier template(opt) template-id ( expression-list(opt) ) postfix-expression . template(opt) ::(opt) id-expression postfix-expression -> template(opt) ::(opt) id-expression postfix-expression . pseudo-destructor-name postfix-expression -> pseudo-destructor-name postfix-expression ++ postfix-expression -- dynamic_cast < type-id > ( expression ) static_cast < type-id > ( expression ) reinterpret_cast < type-id > ( expression ) const_cast < type-id > ( expression ) typeid ( expression ) typeid ( type-id ) */ void cxx_pretty_printer::postfix_expression (tree t) { enum tree_code code = TREE_CODE (t); switch (code) { case AGGR_INIT_EXPR: case CALL_EXPR: { tree fun = cp_get_callee (t); tree saved_scope = enclosing_scope; bool skipfirst = false; tree arg; if (TREE_CODE (fun) == ADDR_EXPR) fun = TREE_OPERAND (fun, 0); /* In templates, where there is no way to tell whether a given call uses an actual member function. So the parser builds FUN as a COMPONENT_REF or a plain IDENTIFIER_NODE until instantiation time. */ if (TREE_CODE (fun) != FUNCTION_DECL) ; else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fun)) { tree object = (code == AGGR_INIT_EXPR ? (AGGR_INIT_VIA_CTOR_P (t) ? AGGR_INIT_EXPR_SLOT (t) : AGGR_INIT_EXPR_ARG (t, 0)) : CALL_EXPR_ARG (t, 0)); while (TREE_CODE (object) == NOP_EXPR) object = TREE_OPERAND (object, 0); if (TREE_CODE (object) == ADDR_EXPR) object = TREE_OPERAND (object, 0); if (!TYPE_PTR_P (TREE_TYPE (object))) { postfix_expression (object); pp_cxx_dot (this); } else { postfix_expression (object); pp_cxx_arrow (this); } skipfirst = true; enclosing_scope = strip_pointer_operator (TREE_TYPE (object)); } postfix_expression (fun); enclosing_scope = saved_scope; pp_cxx_left_paren (this); if (code == AGGR_INIT_EXPR) { aggr_init_expr_arg_iterator iter; FOR_EACH_AGGR_INIT_EXPR_ARG (arg, iter, t) { if (skipfirst) skipfirst = false; else { expression (arg); if (more_aggr_init_expr_args_p (&iter)) pp_cxx_separate_with (this, ','); } } } else { call_expr_arg_iterator iter; FOR_EACH_CALL_EXPR_ARG (arg, iter, t) { if (skipfirst) skipfirst = false; else { expression (arg); if (more_call_expr_args_p (&iter)) pp_cxx_separate_with (this, ','); } } } pp_cxx_right_paren (this); } if (code == AGGR_INIT_EXPR && AGGR_INIT_VIA_CTOR_P (t)) { pp_cxx_separate_with (this, ','); postfix_expression (AGGR_INIT_EXPR_SLOT (t)); } break; case BASELINK: case VAR_DECL: case PARM_DECL: case FIELD_DECL: case FUNCTION_DECL: case OVERLOAD: case CONST_DECL: case TEMPLATE_DECL: case RESULT_DECL: primary_expression (t); break; case DYNAMIC_CAST_EXPR: case STATIC_CAST_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: if (code == DYNAMIC_CAST_EXPR) pp_cxx_ws_string (this, "dynamic_cast"); else if (code == STATIC_CAST_EXPR) pp_cxx_ws_string (this, "static_cast"); else if (code == REINTERPRET_CAST_EXPR) pp_cxx_ws_string (this, "reinterpret_cast"); else pp_cxx_ws_string (this, "const_cast"); pp_cxx_begin_template_argument_list (this); type_id (TREE_TYPE (t)); pp_cxx_end_template_argument_list (this); pp_left_paren (this); expression (TREE_OPERAND (t, 0)); pp_right_paren (this); break; case EMPTY_CLASS_EXPR: type_id (TREE_TYPE (t)); pp_left_paren (this); pp_right_paren (this); break; case TYPEID_EXPR: pp_cxx_typeid_expression (this, t); break; case PSEUDO_DTOR_EXPR: postfix_expression (TREE_OPERAND (t, 0)); pp_cxx_dot (this); if (TREE_OPERAND (t, 1)) { pp_cxx_qualified_id (this, TREE_OPERAND (t, 1)); pp_cxx_colon_colon (this); } pp_complement (this); pp_cxx_unqualified_id (this, TREE_OPERAND (t, 2)); break; case ARROW_EXPR: postfix_expression (TREE_OPERAND (t, 0)); pp_cxx_arrow (this); break; default: c_pretty_printer::postfix_expression (t); break; } } /* new-expression: ::(opt) new new-placement(opt) new-type-id new-initializer(opt) ::(opt) new new-placement(opt) ( type-id ) new-initializer(opt) new-placement: ( expression-list ) new-type-id: type-specifier-seq new-declarator(opt) new-declarator: ptr-operator new-declarator(opt) direct-new-declarator direct-new-declarator [ expression ] direct-new-declarator [ constant-expression ] new-initializer: ( expression-list(opt) ) */ static void pp_cxx_new_expression (cxx_pretty_printer *pp, tree t) { enum tree_code code = TREE_CODE (t); tree type = TREE_OPERAND (t, 1); tree init = TREE_OPERAND (t, 2); switch (code) { case NEW_EXPR: case VEC_NEW_EXPR: if (NEW_EXPR_USE_GLOBAL (t)) pp_cxx_colon_colon (pp); pp_cxx_ws_string (pp, "new"); if (TREE_OPERAND (t, 0)) { pp_cxx_call_argument_list (pp, TREE_OPERAND (t, 0)); pp_space (pp); } if (TREE_CODE (type) == ARRAY_REF) type = build_cplus_array_type (TREE_OPERAND (type, 0), build_index_type (fold_build2_loc (input_location, MINUS_EXPR, integer_type_node, TREE_OPERAND (type, 1), integer_one_node))); pp->type_id (type); if (init) { pp_left_paren (pp); if (TREE_CODE (init) == TREE_LIST) pp_c_expression_list (pp, init); else if (init == void_node) ; /* OK, empty initializer list. */ else pp->expression (init); pp_right_paren (pp); } break; default: pp_unsupported_tree (pp, t); } } /* delete-expression: ::(opt) delete cast-expression ::(opt) delete [ ] cast-expression */ static void pp_cxx_delete_expression (cxx_pretty_printer *pp, tree t) { enum tree_code code = TREE_CODE (t); switch (code) { case DELETE_EXPR: case VEC_DELETE_EXPR: if (DELETE_EXPR_USE_GLOBAL (t)) pp_cxx_colon_colon (pp); pp_cxx_ws_string (pp, "delete"); pp_space (pp); if (code == VEC_DELETE_EXPR || DELETE_EXPR_USE_VEC (t)) { pp_left_bracket (pp); pp_right_bracket (pp); pp_space (pp); } pp_c_cast_expression (pp, TREE_OPERAND (t, 0)); break; default: pp_unsupported_tree (pp, t); } } /* unary-expression: postfix-expression ++ cast-expression -- cast-expression unary-operator cast-expression sizeof unary-expression sizeof ( type-id ) sizeof ... ( identifier ) new-expression delete-expression unary-operator: one of * & + - ! GNU extensions: __alignof__ unary-expression __alignof__ ( type-id ) */ void cxx_pretty_printer::unary_expression (tree t) { enum tree_code code = TREE_CODE (t); switch (code) { case NEW_EXPR: case VEC_NEW_EXPR: pp_cxx_new_expression (this, t); break; case DELETE_EXPR: case VEC_DELETE_EXPR: pp_cxx_delete_expression (this, t); break; case SIZEOF_EXPR: if (PACK_EXPANSION_P (TREE_OPERAND (t, 0))) { pp_cxx_ws_string (this, "sizeof"); pp_cxx_ws_string (this, "..."); pp_cxx_whitespace (this); pp_cxx_left_paren (this); if (TYPE_P (TREE_OPERAND (t, 0))) type_id (TREE_OPERAND (t, 0)); else unary_expression (TREE_OPERAND (t, 0)); pp_cxx_right_paren (this); break; } /* Fall through */ case ALIGNOF_EXPR: pp_cxx_ws_string (this, code == SIZEOF_EXPR ? "sizeof" : "__alignof__"); pp_cxx_whitespace (this); if (TREE_CODE (t) == SIZEOF_EXPR && SIZEOF_EXPR_TYPE_P (t)) { pp_cxx_left_paren (this); type_id (TREE_TYPE (TREE_OPERAND (t, 0))); pp_cxx_right_paren (this); } else if (TYPE_P (TREE_OPERAND (t, 0))) { pp_cxx_left_paren (this); type_id (TREE_OPERAND (t, 0)); pp_cxx_right_paren (this); } else unary_expression (TREE_OPERAND (t, 0)); break; case AT_ENCODE_EXPR: pp_cxx_ws_string (this, "@encode"); pp_cxx_whitespace (this); pp_cxx_left_paren (this); type_id (TREE_OPERAND (t, 0)); pp_cxx_right_paren (this); break; case NOEXCEPT_EXPR: pp_cxx_ws_string (this, "noexcept"); pp_cxx_whitespace (this); pp_cxx_left_paren (this); expression (TREE_OPERAND (t, 0)); pp_cxx_right_paren (this); break; case UNARY_PLUS_EXPR: pp_plus (this); pp_cxx_cast_expression (this, TREE_OPERAND (t, 0)); break; default: c_pretty_printer::unary_expression (t); break; } } /* cast-expression: unary-expression ( type-id ) cast-expression */ static void pp_cxx_cast_expression (cxx_pretty_printer *pp, tree t) { switch (TREE_CODE (t)) { case CAST_EXPR: case IMPLICIT_CONV_EXPR: pp->type_id (TREE_TYPE (t)); pp_cxx_call_argument_list (pp, TREE_OPERAND (t, 0)); break; default: pp_c_cast_expression (pp, t); break; } } /* pm-expression: cast-expression pm-expression .* cast-expression pm-expression ->* cast-expression */ static void pp_cxx_pm_expression (cxx_pretty_printer *pp, tree t) { switch (TREE_CODE (t)) { /* Handle unfortunate OFFSET_REF overloading here. */ case OFFSET_REF: if (TYPE_P (TREE_OPERAND (t, 0))) { pp_cxx_qualified_id (pp, t); break; } /* Fall through. */ case MEMBER_REF: case DOTSTAR_EXPR: pp_cxx_pm_expression (pp, TREE_OPERAND (t, 0)); if (TREE_CODE (t) == MEMBER_REF) pp_cxx_arrow (pp); else pp_cxx_dot (pp); pp_star(pp); pp_cxx_cast_expression (pp, TREE_OPERAND (t, 1)); break; default: pp_cxx_cast_expression (pp, t); break; } } /* multiplicative-expression: pm-expression multiplicative-expression * pm-expression multiplicative-expression / pm-expression multiplicative-expression % pm-expression */ void cxx_pretty_printer::multiplicative_expression (tree e) { enum tree_code code = TREE_CODE (e); switch (code) { case MULT_EXPR: case TRUNC_DIV_EXPR: case TRUNC_MOD_EXPR: case EXACT_DIV_EXPR: case RDIV_EXPR: multiplicative_expression (TREE_OPERAND (e, 0)); pp_space (this); if (code == MULT_EXPR) pp_star (this); else if (code != TRUNC_MOD_EXPR) pp_slash (this); else pp_modulo (this); pp_space (this); pp_cxx_pm_expression (this, TREE_OPERAND (e, 1)); break; default: pp_cxx_pm_expression (this, e); break; } } /* conditional-expression: logical-or-expression logical-or-expression ? expression : assignment-expression */ void cxx_pretty_printer::conditional_expression (tree e) { if (TREE_CODE (e) == COND_EXPR) { pp_c_logical_or_expression (this, TREE_OPERAND (e, 0)); pp_space (this); pp_question (this); pp_space (this); expression (TREE_OPERAND (e, 1)); pp_space (this); assignment_expression (TREE_OPERAND (e, 2)); } else pp_c_logical_or_expression (this, e); } /* Pretty-print a compound assignment operator token as indicated by T. */ static void pp_cxx_assignment_operator (cxx_pretty_printer *pp, tree t) { const char *op; switch (TREE_CODE (t)) { case NOP_EXPR: op = "="; break; case PLUS_EXPR: op = "+="; break; case MINUS_EXPR: op = "-="; break; case TRUNC_DIV_EXPR: op = "/="; break; case TRUNC_MOD_EXPR: op = "%="; break; default: op = get_tree_code_name (TREE_CODE (t)); break; } pp_cxx_ws_string (pp, op); } /* assignment-expression: conditional-expression logical-or-expression assignment-operator assignment-expression throw-expression throw-expression: throw assignment-expression(opt) assignment-operator: one of = *= /= %= += -= >>= <<= &= ^= |= */ void cxx_pretty_printer::assignment_expression (tree e) { switch (TREE_CODE (e)) { case MODIFY_EXPR: case INIT_EXPR: pp_c_logical_or_expression (this, TREE_OPERAND (e, 0)); pp_space (this); pp_equal (this); pp_space (this); assignment_expression (TREE_OPERAND (e, 1)); break; case THROW_EXPR: pp_cxx_ws_string (this, "throw"); if (TREE_OPERAND (e, 0)) assignment_expression (TREE_OPERAND (e, 0)); break; case MODOP_EXPR: pp_c_logical_or_expression (this, TREE_OPERAND (e, 0)); pp_cxx_assignment_operator (this, TREE_OPERAND (e, 1)); assignment_expression (TREE_OPERAND (e, 2)); break; default: conditional_expression (e); break; } } void cxx_pretty_printer::expression (tree t) { switch (TREE_CODE (t)) { case STRING_CST: case VOID_CST: case INTEGER_CST: case REAL_CST: case COMPLEX_CST: constant (t); break; case USERDEF_LITERAL: pp_cxx_userdef_literal (this, t); break; case RESULT_DECL: pp_cxx_unqualified_id (this, t); break; #if 0 case OFFSET_REF: #endif case SCOPE_REF: case PTRMEM_CST: pp_cxx_qualified_id (this, t); break; case OVERLOAD: t = OVL_FIRST (t); /* FALLTHRU */ case VAR_DECL: case PARM_DECL: case FIELD_DECL: case CONST_DECL: case FUNCTION_DECL: case BASELINK: case TEMPLATE_DECL: case TEMPLATE_TYPE_PARM: case TEMPLATE_PARM_INDEX: case TEMPLATE_TEMPLATE_PARM: case STMT_EXPR: case REQUIRES_EXPR: primary_expression (t); break; case CALL_EXPR: case DYNAMIC_CAST_EXPR: case STATIC_CAST_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: #if 0 case MEMBER_REF: #endif case EMPTY_CLASS_EXPR: case TYPEID_EXPR: case PSEUDO_DTOR_EXPR: case AGGR_INIT_EXPR: case ARROW_EXPR: postfix_expression (t); break; case NEW_EXPR: case VEC_NEW_EXPR: pp_cxx_new_expression (this, t); break; case DELETE_EXPR: case VEC_DELETE_EXPR: pp_cxx_delete_expression (this, t); break; case SIZEOF_EXPR: case ALIGNOF_EXPR: case NOEXCEPT_EXPR: case UNARY_PLUS_EXPR: unary_expression (t); break; case CAST_EXPR: case IMPLICIT_CONV_EXPR: pp_cxx_cast_expression (this, t); break; case OFFSET_REF: case MEMBER_REF: case DOTSTAR_EXPR: pp_cxx_pm_expression (this, t); break; case MULT_EXPR: case TRUNC_DIV_EXPR: case TRUNC_MOD_EXPR: case EXACT_DIV_EXPR: case RDIV_EXPR: multiplicative_expression (t); break; case COND_EXPR: conditional_expression (t); break; case MODIFY_EXPR: case INIT_EXPR: case THROW_EXPR: case MODOP_EXPR: assignment_expression (t); break; case NON_DEPENDENT_EXPR: case MUST_NOT_THROW_EXPR: expression (TREE_OPERAND (t, 0)); break; case EXPR_PACK_EXPANSION: expression (PACK_EXPANSION_PATTERN (t)); pp_cxx_ws_string (this, "..."); break; case UNARY_LEFT_FOLD_EXPR: pp_cxx_unary_left_fold_expression (this, t); break; case UNARY_RIGHT_FOLD_EXPR: pp_cxx_unary_right_fold_expression (this, t); break; case BINARY_LEFT_FOLD_EXPR: case BINARY_RIGHT_FOLD_EXPR: pp_cxx_binary_fold_expression (this, t); break; case TEMPLATE_ID_EXPR: pp_cxx_template_id (this, t); break; case NONTYPE_ARGUMENT_PACK: { tree args = ARGUMENT_PACK_ARGS (t); int i, len = TREE_VEC_LENGTH (args); pp_cxx_left_brace (this); for (i = 0; i < len; ++i) { if (i > 0) pp_cxx_separate_with (this, ','); expression (TREE_VEC_ELT (args, i)); } pp_cxx_right_brace (this); } break; case LAMBDA_EXPR: pp_cxx_ws_string (this, "<lambda>"); break; case TRAIT_EXPR: pp_cxx_trait_expression (this, t); break; case ATOMIC_CONSTR: case CHECK_CONSTR: case CONJ_CONSTR: case DISJ_CONSTR: pp_cxx_constraint (this, t); break; case PAREN_EXPR: pp_cxx_left_paren (this); expression (TREE_OPERAND (t, 0)); pp_cxx_right_paren (this); break; default: c_pretty_printer::expression (t); break; } } /* Declarations. */ /* function-specifier: inline virtual explicit */ void cxx_pretty_printer::function_specifier (tree t) { switch (TREE_CODE (t)) { case FUNCTION_DECL: if (DECL_VIRTUAL_P (t)) pp_cxx_ws_string (this, "virtual"); else if (DECL_CONSTRUCTOR_P (t) && DECL_NONCONVERTING_P (t)) pp_cxx_ws_string (this, "explicit"); else c_pretty_printer::function_specifier (t); default: break; } } /* decl-specifier-seq: decl-specifier-seq(opt) decl-specifier decl-specifier: storage-class-specifier type-specifier function-specifier friend typedef */ void cxx_pretty_printer::declaration_specifiers (tree t) { switch (TREE_CODE (t)) { case VAR_DECL: case PARM_DECL: case CONST_DECL: case FIELD_DECL: storage_class_specifier (t); declaration_specifiers (TREE_TYPE (t)); break; case TYPE_DECL: pp_cxx_ws_string (this, "typedef"); declaration_specifiers (TREE_TYPE (t)); break; case FUNCTION_DECL: /* Constructors don't have return types. And conversion functions do not have a type-specifier in their return types. */ if (DECL_CONSTRUCTOR_P (t) || DECL_CONV_FN_P (t)) function_specifier (t); else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (t)) declaration_specifiers (TREE_TYPE (TREE_TYPE (t))); else c_pretty_printer::declaration_specifiers (t); break; default: c_pretty_printer::declaration_specifiers (t); break; } } /* simple-type-specifier: ::(opt) nested-name-specifier(opt) type-name ::(opt) nested-name-specifier(opt) template(opt) template-id decltype-specifier char wchar_t bool short int long signed unsigned float double void */ void cxx_pretty_printer::simple_type_specifier (tree t) { switch (TREE_CODE (t)) { case RECORD_TYPE: case UNION_TYPE: case ENUMERAL_TYPE: pp_cxx_qualified_id (this, t); break; case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case TEMPLATE_PARM_INDEX: case BOUND_TEMPLATE_TEMPLATE_PARM: pp_cxx_unqualified_id (this, t); if (tree c = PLACEHOLDER_TYPE_CONSTRAINTS (t)) pp_cxx_constrained_type_spec (this, c); break; case TYPENAME_TYPE: pp_cxx_ws_string (this, "typename"); pp_cxx_nested_name_specifier (this, TYPE_CONTEXT (t)); pp_cxx_unqualified_id (this, TYPENAME_TYPE_FULLNAME (t)); break; case DECLTYPE_TYPE: pp_cxx_ws_string (this, "decltype"); pp_cxx_left_paren (this); this->expression (DECLTYPE_TYPE_EXPR (t)); pp_cxx_right_paren (this); break; default: c_pretty_printer::simple_type_specifier (t); break; } } /* type-specifier-seq: type-specifier type-specifier-seq(opt) type-specifier: simple-type-specifier class-specifier enum-specifier elaborated-type-specifier cv-qualifier */ static void pp_cxx_type_specifier_seq (cxx_pretty_printer *pp, tree t) { switch (TREE_CODE (t)) { case TEMPLATE_DECL: case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case TYPE_DECL: case BOUND_TEMPLATE_TEMPLATE_PARM: case DECLTYPE_TYPE: pp_cxx_cv_qualifier_seq (pp, t); pp->simple_type_specifier (t); break; case METHOD_TYPE: pp_cxx_type_specifier_seq (pp, TREE_TYPE (t)); pp_cxx_space_for_pointer_operator (pp, TREE_TYPE (t)); pp_cxx_nested_name_specifier (pp, TYPE_METHOD_BASETYPE (t)); break; case RECORD_TYPE: if (TYPE_PTRMEMFUNC_P (t)) { tree pfm = TYPE_PTRMEMFUNC_FN_TYPE (t); pp->declaration_specifiers (TREE_TYPE (TREE_TYPE (pfm))); pp_cxx_whitespace (pp); pp_cxx_ptr_operator (pp, t); break; } /* fall through */ default: if (!(TREE_CODE (t) == FUNCTION_DECL && DECL_CONSTRUCTOR_P (t))) pp_c_specifier_qualifier_list (pp, t); } } /* ptr-operator: * cv-qualifier-seq(opt) & ::(opt) nested-name-specifier * cv-qualifier-seq(opt) */ static void pp_cxx_ptr_operator (cxx_pretty_printer *pp, tree t) { if (!TYPE_P (t) && TREE_CODE (t) != TYPE_DECL) t = TREE_TYPE (t); switch (TREE_CODE (t)) { case REFERENCE_TYPE: case POINTER_TYPE: if (TYPE_PTR_OR_PTRMEM_P (TREE_TYPE (t))) pp_cxx_ptr_operator (pp, TREE_TYPE (t)); pp_c_attributes_display (pp, TYPE_ATTRIBUTES (TREE_TYPE (t))); if (TYPE_PTR_P (t)) { pp_star (pp); pp_cxx_cv_qualifier_seq (pp, t); } else pp_ampersand (pp); break; case RECORD_TYPE: if (TYPE_PTRMEMFUNC_P (t)) { pp_cxx_left_paren (pp); pp_cxx_nested_name_specifier (pp, TYPE_PTRMEMFUNC_OBJECT_TYPE (t)); pp_star (pp); break; } /* FALLTHRU */ case OFFSET_TYPE: if (TYPE_PTRMEM_P (t)) { if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) pp_cxx_left_paren (pp); pp_cxx_nested_name_specifier (pp, TYPE_PTRMEM_CLASS_TYPE (t)); pp_star (pp); pp_cxx_cv_qualifier_seq (pp, t); break; } /* fall through. */ default: pp_unsupported_tree (pp, t); break; } } static inline tree pp_cxx_implicit_parameter_type (tree mf) { return class_of_this_parm (TREE_TYPE (mf)); } /* parameter-declaration: decl-specifier-seq declarator decl-specifier-seq declarator = assignment-expression decl-specifier-seq abstract-declarator(opt) decl-specifier-seq abstract-declarator(opt) assignment-expression */ static inline void pp_cxx_parameter_declaration (cxx_pretty_printer *pp, tree t) { pp->declaration_specifiers (t); if (TYPE_P (t)) pp->abstract_declarator (t); else pp->declarator (t); } /* parameter-declaration-clause: parameter-declaration-list(opt) ...(opt) parameter-declaration-list , ... parameter-declaration-list: parameter-declaration parameter-declaration-list , parameter-declaration */ static void pp_cxx_parameter_declaration_clause (cxx_pretty_printer *pp, tree t) { tree args; tree types; bool abstract; // For a requires clause or the explicit printing of a parameter list // we expect T to be a chain of PARM_DECLs. Otherwise, the list of // args and types are taken from the function decl T. if (TREE_CODE (t) == PARM_DECL) { args = t; types = t; abstract = false; } else { bool type_p = TYPE_P (t); args = type_p ? NULL : FUNCTION_FIRST_USER_PARM (t); types = type_p ? TYPE_ARG_TYPES (t) : FUNCTION_FIRST_USER_PARMTYPE (t); abstract = args == NULL || pp->flags & pp_c_flag_abstract; } bool first = true; /* Skip artificial parameter for nonstatic member functions. */ if (TREE_CODE (t) == METHOD_TYPE) types = TREE_CHAIN (types); pp_cxx_left_paren (pp); for (; args; args = TREE_CHAIN (args), types = TREE_CHAIN (types)) { if (!first) pp_cxx_separate_with (pp, ','); first = false; pp_cxx_parameter_declaration (pp, abstract ? TREE_VALUE (types) : args); if (!abstract && pp->flags & pp_cxx_flag_default_argument) { pp_cxx_whitespace (pp); pp_equal (pp); pp_cxx_whitespace (pp); pp->assignment_expression (TREE_PURPOSE (types)); } } pp_cxx_right_paren (pp); } /* exception-specification: throw ( type-id-list(opt) ) type-id-list type-id type-id-list , type-id */ static void pp_cxx_exception_specification (cxx_pretty_printer *pp, tree t) { tree ex_spec = TYPE_RAISES_EXCEPTIONS (t); bool need_comma = false; if (ex_spec == NULL) return; if (TREE_PURPOSE (ex_spec)) { pp_cxx_ws_string (pp, "noexcept"); pp_cxx_whitespace (pp); pp_cxx_left_paren (pp); if (DEFERRED_NOEXCEPT_SPEC_P (ex_spec)) pp_cxx_ws_string (pp, "<uninstantiated>"); else pp->expression (TREE_PURPOSE (ex_spec)); pp_cxx_right_paren (pp); return; } pp_cxx_ws_string (pp, "throw"); pp_cxx_left_paren (pp); for (; ex_spec && TREE_VALUE (ex_spec); ex_spec = TREE_CHAIN (ex_spec)) { tree type = TREE_VALUE (ex_spec); tree argpack = NULL_TREE; int i, len = 1; if (ARGUMENT_PACK_P (type)) { argpack = ARGUMENT_PACK_ARGS (type); len = TREE_VEC_LENGTH (argpack); } for (i = 0; i < len; ++i) { if (argpack) type = TREE_VEC_ELT (argpack, i); if (need_comma) pp_cxx_separate_with (pp, ','); else need_comma = true; pp->type_id (type); } } pp_cxx_right_paren (pp); } /* direct-declarator: declarator-id direct-declarator ( parameter-declaration-clause ) cv-qualifier-seq(opt) exception-specification(opt) direct-declaration [ constant-expression(opt) ] ( declarator ) */ void cxx_pretty_printer::direct_declarator (tree t) { switch (TREE_CODE (t)) { case VAR_DECL: case PARM_DECL: case CONST_DECL: case FIELD_DECL: if (DECL_NAME (t)) { pp_cxx_space_for_pointer_operator (this, TREE_TYPE (t)); if ((TREE_CODE (t) == PARM_DECL && DECL_PACK_P (t)) || template_parameter_pack_p (t)) /* A function parameter pack or non-type template parameter pack. */ pp_cxx_ws_string (this, "..."); id_expression (DECL_NAME (t)); } abstract_declarator (TREE_TYPE (t)); break; case FUNCTION_DECL: pp_cxx_space_for_pointer_operator (this, TREE_TYPE (TREE_TYPE (t))); expression (t); pp_cxx_parameter_declaration_clause (this, t); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (t)) { padding = pp_before; pp_cxx_cv_qualifier_seq (this, pp_cxx_implicit_parameter_type (t)); } pp_cxx_exception_specification (this, TREE_TYPE (t)); break; case TYPENAME_TYPE: case TEMPLATE_DECL: case TEMPLATE_TYPE_PARM: case TEMPLATE_PARM_INDEX: case TEMPLATE_TEMPLATE_PARM: break; default: c_pretty_printer::direct_declarator (t); break; } } /* declarator: direct-declarator ptr-operator declarator */ void cxx_pretty_printer::declarator (tree t) { direct_declarator (t); // Print a requires clause. if (flag_concepts) if (tree ci = get_constraints (t)) if (tree reqs = CI_DECLARATOR_REQS (ci)) pp_cxx_requires_clause (this, reqs); } /* ctor-initializer: : mem-initializer-list mem-initializer-list: mem-initializer mem-initializer , mem-initializer-list mem-initializer: mem-initializer-id ( expression-list(opt) ) mem-initializer-id: ::(opt) nested-name-specifier(opt) class-name identifier */ static void pp_cxx_ctor_initializer (cxx_pretty_printer *pp, tree t) { t = TREE_OPERAND (t, 0); pp_cxx_whitespace (pp); pp_colon (pp); pp_cxx_whitespace (pp); for (; t; t = TREE_CHAIN (t)) { tree purpose = TREE_PURPOSE (t); bool is_pack = PACK_EXPANSION_P (purpose); if (is_pack) pp->primary_expression (PACK_EXPANSION_PATTERN (purpose)); else pp->primary_expression (purpose); pp_cxx_call_argument_list (pp, TREE_VALUE (t)); if (is_pack) pp_cxx_ws_string (pp, "..."); if (TREE_CHAIN (t)) pp_cxx_separate_with (pp, ','); } } /* function-definition: decl-specifier-seq(opt) declarator ctor-initializer(opt) function-body decl-specifier-seq(opt) declarator function-try-block */ static void pp_cxx_function_definition (cxx_pretty_printer *pp, tree t) { tree saved_scope = pp->enclosing_scope; pp->declaration_specifiers (t); pp->declarator (t); pp_needs_newline (pp) = true; pp->enclosing_scope = DECL_CONTEXT (t); if (DECL_SAVED_TREE (t)) pp->statement (DECL_SAVED_TREE (t)); else pp_cxx_semicolon (pp); pp_newline_and_flush (pp); pp->enclosing_scope = saved_scope; } /* abstract-declarator: ptr-operator abstract-declarator(opt) direct-abstract-declarator */ void cxx_pretty_printer::abstract_declarator (tree t) { if (TYPE_PTRMEM_P (t)) pp_cxx_right_paren (this); else if (INDIRECT_TYPE_P (t)) { if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE || TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE) pp_cxx_right_paren (this); t = TREE_TYPE (t); } direct_abstract_declarator (t); } /* direct-abstract-declarator: direct-abstract-declarator(opt) ( parameter-declaration-clause ) cv-qualifier-seq(opt) exception-specification(opt) direct-abstract-declarator(opt) [ constant-expression(opt) ] ( abstract-declarator ) */ void cxx_pretty_printer::direct_abstract_declarator (tree t) { switch (TREE_CODE (t)) { case REFERENCE_TYPE: abstract_declarator (t); break; case RECORD_TYPE: if (TYPE_PTRMEMFUNC_P (t)) direct_abstract_declarator (TYPE_PTRMEMFUNC_FN_TYPE (t)); break; case METHOD_TYPE: case FUNCTION_TYPE: pp_cxx_parameter_declaration_clause (this, t); direct_abstract_declarator (TREE_TYPE (t)); if (TREE_CODE (t) == METHOD_TYPE) { padding = pp_before; pp_cxx_cv_qualifier_seq (this, class_of_this_parm (t)); } pp_cxx_exception_specification (this, t); break; case TYPENAME_TYPE: case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: case UNBOUND_CLASS_TEMPLATE: case DECLTYPE_TYPE: break; default: c_pretty_printer::direct_abstract_declarator (t); break; } } /* type-id: type-specifier-seq abstract-declarator(opt) */ void cxx_pretty_printer::type_id (tree t) { pp_flags saved_flags = flags; flags |= pp_c_flag_abstract; switch (TREE_CODE (t)) { case TYPE_DECL: case UNION_TYPE: case RECORD_TYPE: case ENUMERAL_TYPE: case TYPENAME_TYPE: case BOUND_TEMPLATE_TEMPLATE_PARM: case UNBOUND_CLASS_TEMPLATE: case TEMPLATE_TEMPLATE_PARM: case TEMPLATE_TYPE_PARM: case TEMPLATE_PARM_INDEX: case TEMPLATE_DECL: case TYPEOF_TYPE: case UNDERLYING_TYPE: case DECLTYPE_TYPE: case TEMPLATE_ID_EXPR: pp_cxx_type_specifier_seq (this, t); break; case TYPE_PACK_EXPANSION: type_id (PACK_EXPANSION_PATTERN (t)); pp_cxx_ws_string (this, "..."); break; case TYPE_ARGUMENT_PACK: { tree args = ARGUMENT_PACK_ARGS (t); int len = TREE_VEC_LENGTH (args); pp_cxx_left_brace (this); for (int i = 0; i < len; ++i) { if (i > 0) pp_cxx_separate_with (this, ','); type_id (TREE_VEC_ELT (args, i)); } pp_cxx_right_brace (this); } break; default: c_pretty_printer::type_id (t); break; } flags = saved_flags; } /* template-argument-list: template-argument ...(opt) template-argument-list, template-argument ...(opt) template-argument: assignment-expression type-id template-name */ static void pp_cxx_template_argument_list (cxx_pretty_printer *pp, tree t) { int i; bool need_comma = false; if (t == NULL) return; for (i = 0; i < TREE_VEC_LENGTH (t); ++i) { tree arg = TREE_VEC_ELT (t, i); tree argpack = NULL_TREE; int idx, len = 1; if (ARGUMENT_PACK_P (arg)) { argpack = ARGUMENT_PACK_ARGS (arg); len = TREE_VEC_LENGTH (argpack); } for (idx = 0; idx < len; idx++) { if (argpack) arg = TREE_VEC_ELT (argpack, idx); if (need_comma) pp_cxx_separate_with (pp, ','); else need_comma = true; if (TYPE_P (arg) || (TREE_CODE (arg) == TEMPLATE_DECL && TYPE_P (DECL_TEMPLATE_RESULT (arg)))) pp->type_id (arg); else pp->expression (arg); } } } static void pp_cxx_exception_declaration (cxx_pretty_printer *pp, tree t) { t = DECL_EXPR_DECL (t); pp_cxx_type_specifier_seq (pp, t); if (TYPE_P (t)) pp->abstract_declarator (t); else pp->declarator (t); } /* Statements. */ void cxx_pretty_printer::statement (tree t) { switch (TREE_CODE (t)) { case CTOR_INITIALIZER: pp_cxx_ctor_initializer (this, t); break; case USING_STMT: pp_cxx_ws_string (this, "using"); pp_cxx_ws_string (this, "namespace"); if (DECL_CONTEXT (t)) pp_cxx_nested_name_specifier (this, DECL_CONTEXT (t)); pp_cxx_qualified_id (this, USING_STMT_NAMESPACE (t)); break; case USING_DECL: pp_cxx_ws_string (this, "using"); pp_cxx_nested_name_specifier (this, USING_DECL_SCOPE (t)); pp_cxx_unqualified_id (this, DECL_NAME (t)); break; case EH_SPEC_BLOCK: break; /* try-block: try compound-statement handler-seq */ case TRY_BLOCK: pp_maybe_newline_and_indent (this, 0); pp_cxx_ws_string (this, "try"); pp_newline_and_indent (this, 3); statement (TRY_STMTS (t)); pp_newline_and_indent (this, -3); if (CLEANUP_P (t)) ; else statement (TRY_HANDLERS (t)); break; /* handler-seq: handler handler-seq(opt) handler: catch ( exception-declaration ) compound-statement exception-declaration: type-specifier-seq declarator type-specifier-seq abstract-declarator ... */ case HANDLER: pp_cxx_ws_string (this, "catch"); pp_cxx_left_paren (this); pp_cxx_exception_declaration (this, HANDLER_PARMS (t)); pp_cxx_right_paren (this); pp_indentation (this) += 3; pp_needs_newline (this) = true; statement (HANDLER_BODY (t)); pp_indentation (this) -= 3; pp_needs_newline (this) = true; break; /* selection-statement: if ( expression ) statement if ( expression ) statement else statement */ case IF_STMT: pp_cxx_ws_string (this, "if"); pp_cxx_whitespace (this); pp_cxx_left_paren (this); expression (IF_COND (t)); pp_cxx_right_paren (this); pp_newline_and_indent (this, 2); statement (THEN_CLAUSE (t)); pp_newline_and_indent (this, -2); if (ELSE_CLAUSE (t)) { tree else_clause = ELSE_CLAUSE (t); pp_cxx_ws_string (this, "else"); if (TREE_CODE (else_clause) == IF_STMT) pp_cxx_whitespace (this); else pp_newline_and_indent (this, 2); statement (else_clause); if (TREE_CODE (else_clause) != IF_STMT) pp_newline_and_indent (this, -2); } break; case SWITCH_STMT: pp_cxx_ws_string (this, "switch"); pp_space (this); pp_cxx_left_paren (this); expression (SWITCH_STMT_COND (t)); pp_cxx_right_paren (this); pp_indentation (this) += 3; pp_needs_newline (this) = true; statement (SWITCH_STMT_BODY (t)); pp_newline_and_indent (this, -3); break; /* iteration-statement: while ( expression ) statement do statement while ( expression ) ; for ( expression(opt) ; expression(opt) ; expression(opt) ) statement for ( declaration expression(opt) ; expression(opt) ) statement */ case WHILE_STMT: pp_cxx_ws_string (this, "while"); pp_space (this); pp_cxx_left_paren (this); expression (WHILE_COND (t)); pp_cxx_right_paren (this); pp_newline_and_indent (this, 3); statement (WHILE_BODY (t)); pp_indentation (this) -= 3; pp_needs_newline (this) = true; break; case DO_STMT: pp_cxx_ws_string (this, "do"); pp_newline_and_indent (this, 3); statement (DO_BODY (t)); pp_newline_and_indent (this, -3); pp_cxx_ws_string (this, "while"); pp_space (this); pp_cxx_left_paren (this); expression (DO_COND (t)); pp_cxx_right_paren (this); pp_cxx_semicolon (this); pp_needs_newline (this) = true; break; case FOR_STMT: pp_cxx_ws_string (this, "for"); pp_space (this); pp_cxx_left_paren (this); if (FOR_INIT_STMT (t)) statement (FOR_INIT_STMT (t)); else pp_cxx_semicolon (this); pp_needs_newline (this) = false; pp_cxx_whitespace (this); if (FOR_COND (t)) expression (FOR_COND (t)); pp_cxx_semicolon (this); pp_needs_newline (this) = false; pp_cxx_whitespace (this); if (FOR_EXPR (t)) expression (FOR_EXPR (t)); pp_cxx_right_paren (this); pp_newline_and_indent (this, 3); statement (FOR_BODY (t)); pp_indentation (this) -= 3; pp_needs_newline (this) = true; break; case RANGE_FOR_STMT: pp_cxx_ws_string (this, "for"); pp_space (this); pp_cxx_left_paren (this); if (RANGE_FOR_INIT_STMT (t)) { statement (RANGE_FOR_INIT_STMT (t)); pp_needs_newline (this) = false; pp_cxx_whitespace (this); } statement (RANGE_FOR_DECL (t)); pp_space (this); pp_needs_newline (this) = false; pp_colon (this); pp_space (this); statement (RANGE_FOR_EXPR (t)); pp_cxx_right_paren (this); pp_newline_and_indent (this, 3); statement (FOR_BODY (t)); pp_indentation (this) -= 3; pp_needs_newline (this) = true; break; /* jump-statement: goto identifier; continue ; return expression(opt) ; */ case BREAK_STMT: case CONTINUE_STMT: pp_string (this, TREE_CODE (t) == BREAK_STMT ? "break" : "continue"); pp_cxx_semicolon (this); pp_needs_newline (this) = true; break; /* expression-statement: expression(opt) ; */ case EXPR_STMT: expression (EXPR_STMT_EXPR (t)); pp_cxx_semicolon (this); pp_needs_newline (this) = true; break; case CLEANUP_STMT: pp_cxx_ws_string (this, "try"); pp_newline_and_indent (this, 2); statement (CLEANUP_BODY (t)); pp_newline_and_indent (this, -2); pp_cxx_ws_string (this, CLEANUP_EH_ONLY (t) ? "catch" : "finally"); pp_newline_and_indent (this, 2); statement (CLEANUP_EXPR (t)); pp_newline_and_indent (this, -2); break; case STATIC_ASSERT: declaration (t); break; case OMP_DEPOBJ: pp_cxx_ws_string (this, "#pragma omp depobj"); pp_space (this); pp_cxx_left_paren (this); expression (OMP_DEPOBJ_DEPOBJ (t)); pp_cxx_right_paren (this); if (OMP_DEPOBJ_CLAUSES (t) && OMP_DEPOBJ_CLAUSES (t) != error_mark_node) { if (TREE_CODE (OMP_DEPOBJ_CLAUSES (t)) == OMP_CLAUSE) dump_omp_clauses (this, OMP_DEPOBJ_CLAUSES (t), pp_indentation (this), TDF_NONE); else switch (tree_to_uhwi (OMP_DEPOBJ_CLAUSES (t))) { case OMP_CLAUSE_DEPEND_IN: pp_cxx_ws_string (this, " update(in)"); break; case OMP_CLAUSE_DEPEND_INOUT: pp_cxx_ws_string (this, " update(inout)"); break; case OMP_CLAUSE_DEPEND_OUT: pp_cxx_ws_string (this, " update(out)"); break; case OMP_CLAUSE_DEPEND_MUTEXINOUTSET: pp_cxx_ws_string (this, " update(mutexinoutset)"); break; case OMP_CLAUSE_DEPEND_LAST: pp_cxx_ws_string (this, " destroy"); break; default: break; } } pp_needs_newline (this) = true; break; default: c_pretty_printer::statement (t); break; } } /* original-namespace-definition: namespace identifier { namespace-body } As an edge case, we also handle unnamed namespace definition here. */ static void pp_cxx_original_namespace_definition (cxx_pretty_printer *pp, tree t) { pp_cxx_ws_string (pp, "namespace"); if (DECL_CONTEXT (t)) pp_cxx_nested_name_specifier (pp, DECL_CONTEXT (t)); if (DECL_NAME (t)) pp_cxx_unqualified_id (pp, t); pp_cxx_whitespace (pp); pp_cxx_left_brace (pp); /* We do not print the namespace-body. */ pp_cxx_whitespace (pp); pp_cxx_right_brace (pp); } /* namespace-alias: identifier namespace-alias-definition: namespace identifier = qualified-namespace-specifier ; qualified-namespace-specifier: ::(opt) nested-name-specifier(opt) namespace-name */ static void pp_cxx_namespace_alias_definition (cxx_pretty_printer *pp, tree t) { pp_cxx_ws_string (pp, "namespace"); if (DECL_CONTEXT (t)) pp_cxx_nested_name_specifier (pp, DECL_CONTEXT (t)); pp_cxx_unqualified_id (pp, t); pp_cxx_whitespace (pp); pp_equal (pp); pp_cxx_whitespace (pp); if (DECL_CONTEXT (DECL_NAMESPACE_ALIAS (t))) pp_cxx_nested_name_specifier (pp, DECL_CONTEXT (DECL_NAMESPACE_ALIAS (t))); pp_cxx_qualified_id (pp, DECL_NAMESPACE_ALIAS (t)); pp_cxx_semicolon (pp); } /* simple-declaration: decl-specifier-seq(opt) init-declarator-list(opt) */ static void pp_cxx_simple_declaration (cxx_pretty_printer *pp, tree t) { pp->declaration_specifiers (t); pp_cxx_init_declarator (pp, t); pp_cxx_semicolon (pp); pp_needs_newline (pp) = true; } /* template-parameter-list: template-parameter template-parameter-list , template-parameter */ static inline void pp_cxx_template_parameter_list (cxx_pretty_printer *pp, tree t) { const int n = TREE_VEC_LENGTH (t); int i; for (i = 0; i < n; ++i) { if (i) pp_cxx_separate_with (pp, ','); pp_cxx_template_parameter (pp, TREE_VEC_ELT (t, i)); } } /* template-parameter: type-parameter parameter-declaration type-parameter: class ...(opt) identifier(opt) class identifier(opt) = type-id typename identifier(opt) typename ...(opt) identifier(opt) = type-id template < template-parameter-list > class ...(opt) identifier(opt) template < template-parameter-list > class identifier(opt) = template-name */ static void pp_cxx_template_parameter (cxx_pretty_printer *pp, tree t) { tree parameter = TREE_VALUE (t); switch (TREE_CODE (parameter)) { case TYPE_DECL: pp_cxx_ws_string (pp, "class"); if (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (t))) pp_cxx_ws_string (pp, "..."); if (DECL_NAME (parameter)) pp_cxx_tree_identifier (pp, DECL_NAME (parameter)); /* FIXME: Check if we should print also default argument. */ break; case PARM_DECL: pp_cxx_parameter_declaration (pp, parameter); break; case TEMPLATE_DECL: break; default: pp_unsupported_tree (pp, t); break; } } /* Pretty-print a template parameter in the canonical form "template-parameter-<level>-<position in parameter list>". */ void pp_cxx_canonical_template_parameter (cxx_pretty_printer *pp, tree parm) { const enum tree_code code = TREE_CODE (parm); /* Brings type template parameters to the canonical forms. */ if (code == TEMPLATE_TYPE_PARM || code == TEMPLATE_TEMPLATE_PARM || code == BOUND_TEMPLATE_TEMPLATE_PARM) parm = TEMPLATE_TYPE_PARM_INDEX (parm); pp_cxx_begin_template_argument_list (pp); pp->translate_string ("template-parameter-"); pp_wide_integer (pp, TEMPLATE_PARM_LEVEL (parm)); pp_minus (pp); pp_wide_integer (pp, TEMPLATE_PARM_IDX (parm) + 1); pp_cxx_end_template_argument_list (pp); } /* Print a constrained-type-specifier. */ void pp_cxx_constrained_type_spec (cxx_pretty_printer *pp, tree c) { pp_cxx_whitespace (pp); pp_cxx_left_bracket (pp); pp->translate_string ("requires"); pp_cxx_whitespace (pp); if (c == error_mark_node) { pp_cxx_ws_string(pp, "<unsatisfied-type-constraint>"); return; } tree t, a; placeholder_extract_concept_and_args (c, t, a); pp->id_expression (t); pp_cxx_begin_template_argument_list (pp); pp_cxx_ws_string (pp, "<placeholder>"); pp_cxx_separate_with (pp, ','); tree args = make_tree_vec (TREE_VEC_LENGTH (a) - 1); for (int i = 0; i < TREE_VEC_LENGTH (a) - 1; ++i) TREE_VEC_ELT (args, i) = TREE_VEC_ELT (a, i + 1); pp_cxx_template_argument_list (pp, args); ggc_free (args); pp_cxx_end_template_argument_list (pp); pp_cxx_right_bracket (pp); } /* template-declaration: export(opt) template < template-parameter-list > declaration Concept extensions: template-declaration: export(opt) template < template-parameter-list > requires-clause(opt) declaration */ static void pp_cxx_template_declaration (cxx_pretty_printer *pp, tree t) { tree tmpl = most_general_template (t); tree level; pp_maybe_newline_and_indent (pp, 0); for (level = DECL_TEMPLATE_PARMS (tmpl); level; level = TREE_CHAIN (level)) { pp_cxx_ws_string (pp, "template"); pp_cxx_begin_template_argument_list (pp); pp_cxx_template_parameter_list (pp, TREE_VALUE (level)); pp_cxx_end_template_argument_list (pp); pp_newline_and_indent (pp, 3); } if (flag_concepts) if (tree ci = get_constraints (t)) if (tree reqs = CI_TEMPLATE_REQS (ci)) { pp_cxx_requires_clause (pp, reqs); pp_newline_and_indent (pp, 6); } if (TREE_CODE (t) == FUNCTION_DECL && DECL_SAVED_TREE (t)) pp_cxx_function_definition (pp, t); else if (TREE_CODE (t) == CONCEPT_DECL) pp_cxx_concept_definition (pp, t); else pp_cxx_simple_declaration (pp, t); } static void pp_cxx_explicit_specialization (cxx_pretty_printer *pp, tree t) { pp_unsupported_tree (pp, t); } static void pp_cxx_explicit_instantiation (cxx_pretty_printer *pp, tree t) { pp_unsupported_tree (pp, t); } static void pp_cxx_concept_definition (cxx_pretty_printer *pp, tree t) { pp_cxx_unqualified_id (pp, DECL_NAME (t)); pp_cxx_whitespace (pp); pp_cxx_ws_string (pp, "="); pp_cxx_whitespace (pp); pp->expression (DECL_INITIAL (t)); pp_cxx_semicolon (pp); } /* declaration: block-declaration function-definition template-declaration explicit-instantiation explicit-specialization linkage-specification namespace-definition block-declaration: simple-declaration asm-definition namespace-alias-definition using-declaration using-directive static_assert-declaration */ void cxx_pretty_printer::declaration (tree t) { if (TREE_CODE (t) == STATIC_ASSERT) { pp_cxx_ws_string (this, "static_assert"); pp_cxx_left_paren (this); expression (STATIC_ASSERT_CONDITION (t)); pp_cxx_separate_with (this, ','); expression (STATIC_ASSERT_MESSAGE (t)); pp_cxx_right_paren (this); } else if (!DECL_LANG_SPECIFIC (t)) pp_cxx_simple_declaration (this, t); else if (DECL_USE_TEMPLATE (t)) switch (DECL_USE_TEMPLATE (t)) { case 1: pp_cxx_template_declaration (this, t); break; case 2: pp_cxx_explicit_specialization (this, t); break; case 3: pp_cxx_explicit_instantiation (this, t); break; default: break; } else switch (TREE_CODE (t)) { case VAR_DECL: case TYPE_DECL: pp_cxx_simple_declaration (this, t); break; case FUNCTION_DECL: if (DECL_SAVED_TREE (t)) pp_cxx_function_definition (this, t); else pp_cxx_simple_declaration (this, t); break; case NAMESPACE_DECL: if (DECL_NAMESPACE_ALIAS (t)) pp_cxx_namespace_alias_definition (this, t); else pp_cxx_original_namespace_definition (this, t); break; default: pp_unsupported_tree (this, t); break; } } static void pp_cxx_typeid_expression (cxx_pretty_printer *pp, tree t) { t = TREE_OPERAND (t, 0); pp_cxx_ws_string (pp, "typeid"); pp_cxx_left_paren (pp); if (TYPE_P (t)) pp->type_id (t); else pp->expression (t); pp_cxx_right_paren (pp); } void pp_cxx_va_arg_expression (cxx_pretty_printer *pp, tree t) { pp_cxx_ws_string (pp, "va_arg"); pp_cxx_left_paren (pp); pp->assignment_expression (TREE_OPERAND (t, 0)); pp_cxx_separate_with (pp, ','); pp->type_id (TREE_TYPE (t)); pp_cxx_right_paren (pp); } static bool pp_cxx_offsetof_expression_1 (cxx_pretty_printer *pp, tree t) { switch (TREE_CODE (t)) { case ARROW_EXPR: if (TREE_CODE (TREE_OPERAND (t, 0)) == STATIC_CAST_EXPR && INDIRECT_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0)))) { pp->type_id (TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0)))); pp_cxx_separate_with (pp, ','); return true; } return false; case COMPONENT_REF: if (!pp_cxx_offsetof_expression_1 (pp, TREE_OPERAND (t, 0))) return false; if (TREE_CODE (TREE_OPERAND (t, 0)) != ARROW_EXPR) pp_cxx_dot (pp); pp->expression (TREE_OPERAND (t, 1)); return true; case ARRAY_REF: if (!pp_cxx_offsetof_expression_1 (pp, TREE_OPERAND (t, 0))) return false; pp_left_bracket (pp); pp->expression (TREE_OPERAND (t, 1)); pp_right_bracket (pp); return true; default: return false; } } void pp_cxx_offsetof_expression (cxx_pretty_printer *pp, tree t) { pp_cxx_ws_string (pp, "offsetof"); pp_cxx_left_paren (pp); if (!pp_cxx_offsetof_expression_1 (pp, TREE_OPERAND (t, 0))) pp->expression (TREE_OPERAND (t, 0)); pp_cxx_right_paren (pp); } void pp_cxx_addressof_expression (cxx_pretty_printer *pp, tree t) { pp_cxx_ws_string (pp, "__builtin_addressof"); pp_cxx_left_paren (pp); pp->expression (TREE_OPERAND (t, 0)); pp_cxx_right_paren (pp); } static char const* get_fold_operator (tree t) { int op = int_cst_value (FOLD_EXPR_OP (t)); ovl_op_info_t *info = OVL_OP_INFO (FOLD_EXPR_MODIFY_P (t), op); return info->name; } void pp_cxx_unary_left_fold_expression (cxx_pretty_printer *pp, tree t) { char const* op = get_fold_operator (t); tree expr = PACK_EXPANSION_PATTERN (FOLD_EXPR_PACK (t)); pp_cxx_left_paren (pp); pp_cxx_ws_string (pp, "..."); pp_cxx_ws_string (pp, op); pp->expression (expr); pp_cxx_right_paren (pp); } void pp_cxx_unary_right_fold_expression (cxx_pretty_printer *pp, tree t) { char const* op = get_fold_operator (t); tree expr = PACK_EXPANSION_PATTERN (FOLD_EXPR_PACK (t)); pp_cxx_left_paren (pp); pp->expression (expr); pp_space (pp); pp_cxx_ws_string (pp, op); pp_cxx_ws_string (pp, "..."); pp_cxx_right_paren (pp); } void pp_cxx_binary_fold_expression (cxx_pretty_printer *pp, tree t) { char const* op = get_fold_operator (t); tree t1 = TREE_OPERAND (t, 1); tree t2 = TREE_OPERAND (t, 2); if (t1 == FOLD_EXPR_PACK (t)) t1 = PACK_EXPANSION_PATTERN (t1); else t2 = PACK_EXPANSION_PATTERN (t2); pp_cxx_left_paren (pp); pp->expression (t1); pp_cxx_ws_string (pp, op); pp_cxx_ws_string (pp, "..."); pp_cxx_ws_string (pp, op); pp->expression (t2); pp_cxx_right_paren (pp); } void pp_cxx_trait_expression (cxx_pretty_printer *pp, tree t) { cp_trait_kind kind = TRAIT_EXPR_KIND (t); switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: pp_cxx_ws_string (pp, "__has_nothrow_assign"); break; case CPTK_HAS_TRIVIAL_ASSIGN: pp_cxx_ws_string (pp, "__has_trivial_assign"); break; case CPTK_HAS_NOTHROW_CONSTRUCTOR: pp_cxx_ws_string (pp, "__has_nothrow_constructor"); break; case CPTK_HAS_TRIVIAL_CONSTRUCTOR: pp_cxx_ws_string (pp, "__has_trivial_constructor"); break; case CPTK_HAS_NOTHROW_COPY: pp_cxx_ws_string (pp, "__has_nothrow_copy"); break; case CPTK_HAS_TRIVIAL_COPY: pp_cxx_ws_string (pp, "__has_trivial_copy"); break; case CPTK_HAS_TRIVIAL_DESTRUCTOR: pp_cxx_ws_string (pp, "__has_trivial_destructor"); break; case CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS: pp_cxx_ws_string (pp, "__has_unique_object_representations"); break; case CPTK_HAS_VIRTUAL_DESTRUCTOR: pp_cxx_ws_string (pp, "__has_virtual_destructor"); break; case CPTK_IS_ABSTRACT: pp_cxx_ws_string (pp, "__is_abstract"); break; case CPTK_IS_AGGREGATE: pp_cxx_ws_string (pp, "__is_aggregate"); break; case CPTK_IS_BASE_OF: pp_cxx_ws_string (pp, "__is_base_of"); break; case CPTK_IS_CLASS: pp_cxx_ws_string (pp, "__is_class"); break; case CPTK_IS_EMPTY: pp_cxx_ws_string (pp, "__is_empty"); break; case CPTK_IS_ENUM: pp_cxx_ws_string (pp, "__is_enum"); break; case CPTK_IS_FINAL: pp_cxx_ws_string (pp, "__is_final"); break; case CPTK_IS_POD: pp_cxx_ws_string (pp, "__is_pod"); break; case CPTK_IS_POLYMORPHIC: pp_cxx_ws_string (pp, "__is_polymorphic"); break; case CPTK_IS_SAME_AS: pp_cxx_ws_string (pp, "__is_same"); break; case CPTK_IS_STD_LAYOUT: pp_cxx_ws_string (pp, "__is_std_layout"); break; case CPTK_IS_TRIVIAL: pp_cxx_ws_string (pp, "__is_trivial"); break; case CPTK_IS_TRIVIALLY_ASSIGNABLE: pp_cxx_ws_string (pp, "__is_trivially_assignable"); break; case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: pp_cxx_ws_string (pp, "__is_trivially_constructible"); break; case CPTK_IS_TRIVIALLY_COPYABLE: pp_cxx_ws_string (pp, "__is_trivially_copyable"); break; case CPTK_IS_UNION: pp_cxx_ws_string (pp, "__is_union"); break; case CPTK_IS_LITERAL_TYPE: pp_cxx_ws_string (pp, "__is_literal_type"); break; case CPTK_IS_ASSIGNABLE: pp_cxx_ws_string (pp, "__is_assignable"); break; case CPTK_IS_CONSTRUCTIBLE: pp_cxx_ws_string (pp, "__is_constructible"); break; default: gcc_unreachable (); } pp_cxx_left_paren (pp); pp->type_id (TRAIT_EXPR_TYPE1 (t)); if (kind == CPTK_IS_BASE_OF || kind == CPTK_IS_SAME_AS) { pp_cxx_separate_with (pp, ','); pp->type_id (TRAIT_EXPR_TYPE2 (t)); } pp_cxx_right_paren (pp); } // requires-clause: // 'requires' logical-or-expression void pp_cxx_requires_clause (cxx_pretty_printer *pp, tree t) { if (!t) return; pp->padding = pp_before; pp_cxx_ws_string (pp, "requires"); pp_space (pp); pp->expression (t); } /* requirement: simple-requirement compound-requirement type-requirement nested-requirement */ static void pp_cxx_requirement (cxx_pretty_printer *pp, tree t) { switch (TREE_CODE (t)) { case SIMPLE_REQ: pp_cxx_simple_requirement (pp, t); break; case TYPE_REQ: pp_cxx_type_requirement (pp, t); break; case COMPOUND_REQ: pp_cxx_compound_requirement (pp, t); break; case NESTED_REQ: pp_cxx_nested_requirement (pp, t); break; default: gcc_unreachable (); } } // requirement-list: // requirement // requirement-list ';' requirement[opt] // static void pp_cxx_requirement_list (cxx_pretty_printer *pp, tree t) { for (; t; t = TREE_CHAIN (t)) pp_cxx_requirement (pp, TREE_VALUE (t)); } // requirement-body: // '{' requirement-list '}' static void pp_cxx_requirement_body (cxx_pretty_printer *pp, tree t) { pp_cxx_left_brace (pp); pp_cxx_requirement_list (pp, t); pp_cxx_right_brace (pp); } // requires-expression: // 'requires' requirement-parameter-list requirement-body void pp_cxx_requires_expr (cxx_pretty_printer *pp, tree t) { pp_string (pp, "requires"); if (tree parms = TREE_OPERAND (t, 0)) { pp_cxx_parameter_declaration_clause (pp, parms); pp_cxx_whitespace (pp); } pp_cxx_requirement_body (pp, TREE_OPERAND (t, 1)); } /* simple-requirement: expression ';' */ void pp_cxx_simple_requirement (cxx_pretty_printer *pp, tree t) { pp->expression (TREE_OPERAND (t, 0)); pp_cxx_semicolon (pp); } /* type-requirement: typename type-name ';' */ void pp_cxx_type_requirement (cxx_pretty_printer *pp, tree t) { pp->type_id (TREE_OPERAND (t, 0)); pp_cxx_semicolon (pp); } /* compound-requirement: '{' expression '}' 'noexcept' [opt] trailing-return-type [opt] */ void pp_cxx_compound_requirement (cxx_pretty_printer *pp, tree t) { pp_cxx_left_brace (pp); pp->expression (TREE_OPERAND (t, 0)); pp_cxx_right_brace (pp); if (COMPOUND_REQ_NOEXCEPT_P (t)) pp_cxx_ws_string (pp, "noexcept"); if (tree type = TREE_OPERAND (t, 1)) { pp_cxx_whitespace (pp); pp_cxx_ws_string (pp, "->"); pp->type_id (type); } pp_cxx_semicolon (pp); } /* nested requirement: 'requires' constraint-expression */ void pp_cxx_nested_requirement (cxx_pretty_printer *pp, tree t) { pp_cxx_ws_string (pp, "requires"); pp->expression (TREE_OPERAND (t, 0)); pp_cxx_semicolon (pp); } void pp_cxx_check_constraint (cxx_pretty_printer *pp, tree t) { tree decl = CHECK_CONSTR_CONCEPT (t); tree tmpl = DECL_TI_TEMPLATE (decl); tree args = CHECK_CONSTR_ARGS (t); tree id = build_nt (TEMPLATE_ID_EXPR, tmpl, args); if (TREE_CODE (decl) == CONCEPT_DECL) pp->expression (id); else if (VAR_P (decl)) pp->expression (id); else if (TREE_CODE (decl) == FUNCTION_DECL) { tree call = build_vl_exp (CALL_EXPR, 2); TREE_OPERAND (call, 0) = integer_two_node; TREE_OPERAND (call, 1) = id; pp->expression (call); } else gcc_unreachable (); } /* Output the "[with ...]" clause for a parameter mapping of an atomic constraint. */ void pp_cxx_parameter_mapping (cxx_pretty_printer *pp, tree map) { pp_cxx_whitespace (pp); pp_cxx_left_bracket (pp); pp->translate_string ("with"); pp_cxx_whitespace (pp); for (tree p = map; p; p = TREE_CHAIN (p)) { tree parm = TREE_VALUE (p); tree arg = TREE_PURPOSE (p); if (TYPE_P (parm)) pp->type_id (parm); else pp_cxx_tree_identifier (pp, DECL_NAME (TEMPLATE_PARM_DECL (parm))); pp_cxx_whitespace (pp); pp_equal (pp); pp_cxx_whitespace (pp); if (TYPE_P (arg) || DECL_TEMPLATE_TEMPLATE_PARM_P (arg)) pp->type_id (arg); else pp->expression (arg); if (TREE_CHAIN (p) != NULL_TREE) pp_cxx_separate_with (pp, ';'); } pp_cxx_right_bracket (pp); } void pp_cxx_atomic_constraint (cxx_pretty_printer *pp, tree t) { /* Emit the expression. */ pp->expression (ATOMIC_CONSTR_EXPR (t)); /* Emit the parameter mapping. */ tree map = ATOMIC_CONSTR_MAP (t); if (map && map != error_mark_node) pp_cxx_parameter_mapping (pp, map); } void pp_cxx_conjunction (cxx_pretty_printer *pp, tree t) { pp_cxx_constraint (pp, TREE_OPERAND (t, 0)); pp_string (pp, " /\\ "); pp_cxx_constraint (pp, TREE_OPERAND (t, 1)); } void pp_cxx_disjunction (cxx_pretty_printer *pp, tree t) { pp_cxx_constraint (pp, TREE_OPERAND (t, 0)); pp_string (pp, " \\/ "); pp_cxx_constraint (pp, TREE_OPERAND (t, 1)); } void pp_cxx_constraint (cxx_pretty_printer *pp, tree t) { if (t == error_mark_node) return pp->expression (t); switch (TREE_CODE (t)) { case ATOMIC_CONSTR: pp_cxx_atomic_constraint (pp, t); break; case CHECK_CONSTR: pp_cxx_check_constraint (pp, t); break; case CONJ_CONSTR: pp_cxx_conjunction (pp, t); break; case DISJ_CONSTR: pp_cxx_disjunction (pp, t); break; case EXPR_PACK_EXPANSION: pp->expression (TREE_OPERAND (t, 0)); break; default: gcc_unreachable (); } } typedef c_pretty_print_fn pp_fun; /* Initialization of a C++ pretty-printer object. */ cxx_pretty_printer::cxx_pretty_printer () : c_pretty_printer (), enclosing_scope (global_namespace) { type_specifier_seq = (pp_fun) pp_cxx_type_specifier_seq; parameter_list = (pp_fun) pp_cxx_parameter_declaration_clause; } /* cxx_pretty_printer's implementation of pretty_printer::clone vfunc. */ pretty_printer * cxx_pretty_printer::clone () const { return new cxx_pretty_printer (*this); }
GB_binop__bshift_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint16) // C=scalar+B GB (_bind1st__bshift_uint16) // C=scalar+B' GB (_bind1st_tran__bshift_uint16) // C=A+scalar GB (_bind2nd__bshift_uint16) // C=A'+scalar GB (_bind2nd_tran__bshift_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_bitshift_uint16 (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_uint16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT16 || GxB_NO_BSHIFT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bshift_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_uint16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_uint16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-12,16),ceild(4*t2-Nz-19,32));t3<=min(min(floord(4*Nt+Ny-9,32),floord(2*t1+Ny-3,32)),floord(4*t2+Ny-9,32));t3++) { for (t4=max(max(ceild(t1-60,64),ceild(4*t2-Nz-115,128)),ceild(32*t3-Ny-115,128));t4<=min(min(min(floord(4*Nt+Nx-9,128),floord(2*t1+Nx-3,128)),floord(4*t2+Nx-9,128)),floord(32*t3+Nx+19,128));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(128*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
pyField.h
#pragma once #include <memory> #include "Grid.h" #include "FieldValue.h" #include "Mapping.h" #include "Fdtd.h" #include "Psatd.h" #include "Pstd.h" #include "Mapping.h" #include "pybind11/pybind11.h" namespace py = pybind11; using namespace pybind11::literals; namespace pfc { template <class TGrid, class TFieldSolver> class pyFieldEntity : public TGrid, public TFieldSolver { public: pyFieldEntity(const Int3 & numInternalCells, const FP3 & minCoords, const FP3 & steps, FP dt) : TGrid(Int3(numInternalCells), minCoords, steps, numInternalCells), TFieldSolver(static_cast<TGrid*>(this), dt) {} void refresh() { this->globalTime = 0.0; } }; template <class TGrid, class TFieldSolver, class TDerived, bool ifStraggered> class pyStraggeredFieldIntarface {}; // spatial straggered grids template <class TGrid, class TFieldSolver, class TDerived> class pyStraggeredFieldIntarface<TGrid, TFieldSolver, TDerived, true> { public: template <class FieldConfigurationType> void setFieldConfiguration(const FieldConfigurationType* fieldConf) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); const int chunkSize = 32; const int nChunks = fieldEntity->numCells.z / chunkSize; const int chunkRem = fieldEntity->numCells.z % chunkSize; const int nx = fieldEntity->numCells.x, ny = fieldEntity->numCells.y; #pragma omp parallel for collapse(2) for (int i = 0; i < nx; i++) for (int j = 0; j < ny; j++) for (int chunk = 0; chunk < nChunks + 1; chunk++) { FP3 cEx[chunkSize], cEy[chunkSize], cEz[chunkSize]; FP3 cBx[chunkSize], cBy[chunkSize], cBz[chunkSize]; int kLast = chunk == nChunks ? chunkRem : chunkSize; #pragma ivdep for (int k = 0; k < kLast; k++) { cEx[k] = derived->convertCoords(fieldEntity->ExPosition(i, j, chunk * chunkSize), fieldEntity->timeShiftE); cEy[k] = derived->convertCoords(fieldEntity->EyPosition(i, j, chunk * chunkSize), fieldEntity->timeShiftE); cEz[k] = derived->convertCoords(fieldEntity->EzPosition(i, j, chunk * chunkSize), fieldEntity->timeShiftE); cBx[k] = derived->convertCoords(fieldEntity->BxPosition(i, j, chunk * chunkSize), fieldEntity->timeShiftB); cBy[k] = derived->convertCoords(fieldEntity->ByPosition(i, j, chunk * chunkSize), fieldEntity->timeShiftB); cBz[k] = derived->convertCoords(fieldEntity->BzPosition(i, j, chunk * chunkSize), fieldEntity->timeShiftB); } #pragma ivdep #pragma omp simd for (int k = 0; k < kLast; k++) { fieldEntity->Ex(i, j, k) = fieldConf->getE(cEx[k].x, cEx[k].y, cEx[k].z).x; fieldEntity->Ey(i, j, k) = fieldConf->getE(cEy[k].x, cEy[k].y, cEy[k].z).y; fieldEntity->Ez(i, j, k) = fieldConf->getE(cEz[k].x, cEz[k].y, cEz[k].z).z; fieldEntity->Bx(i, j, k) = fieldConf->getB(cBx[k].x, cBx[k].y, cBx[k].z).x; fieldEntity->By(i, j, k) = fieldConf->getB(cBy[k].x, cBy[k].y, cBy[k].z).y; fieldEntity->Bz(i, j, k) = fieldConf->getB(cBz[k].x, cBz[k].y, cBz[k].z).z; } } } }; // collocated grids template <class TGrid, class TFieldSolver, class TDerived> class pyStraggeredFieldIntarface<TGrid, TFieldSolver, TDerived, false> { public: template <class FieldConfigurationType> void setFieldConfiguration(const FieldConfigurationType* fieldConf) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); const int chunkSize = 32; const int nChunks = fieldEntity->numCells.z / chunkSize; const int chunkRem = fieldEntity->numCells.z % chunkSize; const int nx = fieldEntity->numCells.x, ny = fieldEntity->numCells.y; #pragma omp parallel for collapse(2) for (int i = 0; i < nx; i++) for (int j = 0; j < ny; j++) for (int chunk = 0; chunk < nChunks + 1; chunk++) { FP3 coords[chunkSize]; int kLast = chunk == nChunks ? chunkRem : chunkSize; FP3 startPosition = fieldEntity->ExPosition(i, j, chunk * chunkSize); #pragma ivdep for (int k = 0; k < kLast; k++) { FP3 position(startPosition.x, startPosition.y, startPosition.z + k * fieldEntity->steps.z); coords[k] = derived->convertCoords(position); } #pragma ivdep #pragma omp simd for (int k = 0; k < kLast; k++) { FP3 E, B; fieldConf->getEB(coords[k].x, coords[k].y, coords[k].z, &E, &B); fieldEntity->Ex(i, j, k + chunk * chunkSize) = E.x; fieldEntity->Ey(i, j, k + chunk * chunkSize) = E.y; fieldEntity->Ez(i, j, k + chunk * chunkSize) = E.z; fieldEntity->Bx(i, j, k + chunk * chunkSize) = B.x; fieldEntity->By(i, j, k + chunk * chunkSize) = B.y; fieldEntity->Bz(i, j, k + chunk * chunkSize) = B.z; } } } void pySetEMField(py::function fValueField) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 coords = derived->convertCoords(fieldEntity->ExPosition(i, j, k)); ValueField field = fValueField("x"_a = coords.x, "y"_a = coords.y, "z"_a = coords.z). template cast<ValueField>(); fieldEntity->Ex(i, j, k) = field.E.x; fieldEntity->Ey(i, j, k) = field.E.y; fieldEntity->Ez(i, j, k) = field.E.z; fieldEntity->Bx(i, j, k) = field.B.x; fieldEntity->By(i, j, k) = field.B.y; fieldEntity->Bz(i, j, k) = field.B.z; } } void setEMField(int64_t _fValueField) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); void(*fValueField)(FP, FP, FP, FP*) = (void(*)(FP, FP, FP, FP*))_fValueField; const int chunkSize = 32; const int nChunks = fieldEntity->numCells.z / chunkSize; const int chunkRem = fieldEntity->numCells.z % chunkSize; const int nx = fieldEntity->numCells.x, ny = fieldEntity->numCells.y; #pragma omp parallel for collapse(2) for (int i = 0; i < nx; i++) for (int j = 0; j < ny; j++) for (int chunk = 0; chunk < nChunks + 1; chunk++) { FP3 coords[chunkSize]; int kLast = chunk == nChunks ? chunkRem : chunkSize; FP3 startPosition = fieldEntity->ExPosition(i, j, chunk * chunkSize); #pragma ivdep for (int k = 0; k < kLast; k++) { FP3 position(startPosition.x, startPosition.y, startPosition.z + k * fieldEntity->steps.z); coords[k] = derived->convertCoords(position); } #pragma ivdep #pragma omp simd for (int k = 0; k < kLast; k++) { ValueField field(0.0, 0.0, 0.0, 0.0, 0.0, 0.0); fValueField(coords[k].x, coords[k].y, coords[k].z, &(field.E.x)); fieldEntity->Ex(i, j, k + chunk * chunkSize) = field.E.x; fieldEntity->Ey(i, j, k + chunk * chunkSize) = field.E.y; fieldEntity->Ez(i, j, k + chunk * chunkSize) = field.E.z; fieldEntity->Bx(i, j, k + chunk * chunkSize) = field.B.x; fieldEntity->By(i, j, k + chunk * chunkSize) = field.B.y; fieldEntity->Bz(i, j, k + chunk * chunkSize) = field.B.z; } } } }; template<class TGrid, class TFieldSolver, class TDerived> class pyFieldGridInterface : public pyStraggeredFieldIntarface<TGrid, TFieldSolver, TDerived, TGrid::ifFieldsSpatialStraggered && TGrid::ifFieldsTimeStraggered> { public: pyFieldGridInterface() { fEt[0] = 0; fEt[1] = 0; fEt[2] = 0; fBt[0] = 0; fBt[1] = 0; fBt[2] = 0; isAnalytical = false; } void setAnalytical(int64_t _fEx, int64_t _fEy, int64_t _fEz, int64_t _fBx, int64_t _fBy, int64_t _fBz) { fEt[0] = _fEx; fEt[1] = _fEy; fEt[2] = _fEz; fBt[0] = _fBx; fBt[1] = _fBy; fBt[2] = _fBz; isAnalytical = true; } void analyticalUpdateFields(FP t) { if (isAnalytical) { setExyzt(fEt[0], fEt[1], fEt[2], t); setBxyzt(fBt[0], fBt[1], fBt[2], t); } } void pySetExyz(py::function fEx, py::function fEy, py::function fEz) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cEx, cEy, cEz; cEx = derived->convertCoords(fieldEntity->ExPosition(i, j, k), fieldEntity->timeShiftE); cEy = derived->convertCoords(fieldEntity->EyPosition(i, j, k), fieldEntity->timeShiftE); cEz = derived->convertCoords(fieldEntity->EzPosition(i, j, k), fieldEntity->timeShiftE); fieldEntity->Ex(i, j, k) = fEx("x"_a = cEx.x, "y"_a = cEx.y, "z"_a = cEx.z).template cast<FP>(); fieldEntity->Ey(i, j, k) = fEy("x"_a = cEy.x, "y"_a = cEy.y, "z"_a = cEy.z).template cast<FP>(); fieldEntity->Ez(i, j, k) = fEz("x"_a = cEz.x, "y"_a = cEz.y, "z"_a = cEz.z).template cast<FP>(); } } void pySetE(py::function fE) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cEx, cEy, cEz; cEx = derived->convertCoords(fieldEntity->ExPosition(i, j, k), fieldEntity->timeShiftE); cEy = derived->convertCoords(fieldEntity->EyPosition(i, j, k), fieldEntity->timeShiftE); cEz = derived->convertCoords(fieldEntity->EzPosition(i, j, k), fieldEntity->timeShiftE); fieldEntity->Ex(i, j, k) = fE("x"_a = cEx.x, "y"_a = cEx.y, "z"_a = cEx.z).template cast<FP3>().x; fieldEntity->Ey(i, j, k) = fE("x"_a = cEy.x, "y"_a = cEy.y, "z"_a = cEy.z).template cast<FP3>().y; fieldEntity->Ez(i, j, k) = fE("x"_a = cEz.x, "y"_a = cEz.y, "z"_a = cEz.z).template cast<FP3>().z; } } void setExyz(int64_t _fEx, int64_t _fEy, int64_t _fEz) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP(*fEx)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fEx; FP(*fEy)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fEy; FP(*fEz)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fEz; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cEx, cEy, cEz; cEx = derived->convertCoords(fieldEntity->ExPosition(i, j, k), fieldEntity->timeShiftE); cEy = derived->convertCoords(fieldEntity->EyPosition(i, j, k), fieldEntity->timeShiftE); cEz = derived->convertCoords(fieldEntity->EzPosition(i, j, k), fieldEntity->timeShiftE); fieldEntity->Ex(i, j, k) = fEx(cEx.x, cEx.y, cEx.z); fieldEntity->Ey(i, j, k) = fEy(cEy.x, cEy.y, cEy.z); fieldEntity->Ez(i, j, k) = fEz(cEz.x, cEz.y, cEz.z); } } void setExyzt(int64_t _fEx, int64_t _fEy, int64_t _fEz, FP t) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP(*fEx)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fEx; FP(*fEy)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fEy; FP(*fEz)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fEz; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cEx, cEy, cEz; cEx = derived->convertCoords(fieldEntity->ExPosition(i, j, k), fieldEntity->timeShiftE); cEy = derived->convertCoords(fieldEntity->EyPosition(i, j, k), fieldEntity->timeShiftE); cEz = derived->convertCoords(fieldEntity->EzPosition(i, j, k), fieldEntity->timeShiftE); fieldEntity->Ex(i, j, k) = fEx(cEx.x, cEx.y, cEx.z, t + fieldEntity->timeShiftE); fieldEntity->Ey(i, j, k) = fEy(cEy.x, cEy.y, cEy.z, t + fieldEntity->timeShiftE); fieldEntity->Ez(i, j, k) = fEz(cEz.x, cEz.y, cEz.z, t + fieldEntity->timeShiftE); } } void setE(int64_t _fE) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP3(*fE)(FP, FP, FP) = (FP3(*)(FP, FP, FP))_fE; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cEx, cEy, cEz; cEx = derived->convertCoords(fieldEntity->ExPosition(i, j, k), fieldEntity->timeShiftE); cEy = derived->convertCoords(fieldEntity->EyPosition(i, j, k), fieldEntity->timeShiftE); cEz = derived->convertCoords(fieldEntity->EzPosition(i, j, k), fieldEntity->timeShiftE); fieldEntity->Ex(i, j, k) = fE(cEx.x, cEx.y, cEx.z).x; fieldEntity->Ey(i, j, k) = fE(cEy.x, cEy.y, cEy.z).y; fieldEntity->Ez(i, j, k) = fE(cEz.x, cEz.y, cEz.z).z; } } void pySetBxyz(py::function fBx, py::function fBy, py::function fBz) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cBx, cBy, cBz; cBx = derived->convertCoords(fieldEntity->BxPosition(i, j, k), fieldEntity->timeShiftB); cBy = derived->convertCoords(fieldEntity->ByPosition(i, j, k), fieldEntity->timeShiftB); cBz = derived->convertCoords(fieldEntity->BzPosition(i, j, k), fieldEntity->timeShiftB); fieldEntity->Bx(i, j, k) = fBx("x"_a = cBx.x, "y"_a = cBx.y, "z"_a = cBx.z).template cast<FP>(); fieldEntity->By(i, j, k) = fBy("x"_a = cBy.x, "y"_a = cBy.y, "z"_a = cBy.z).template cast<FP>(); fieldEntity->Bz(i, j, k) = fBz("x"_a = cBz.x, "y"_a = cBz.y, "z"_a = cBz.z).template cast<FP>(); } } void pySetB(py::function fB) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cBx, cBy, cBz; cBx = derived->convertCoords(fieldEntity->BxPosition(i, j, k), fieldEntity->timeShiftB); cBy = derived->convertCoords(fieldEntity->ByPosition(i, j, k), fieldEntity->timeShiftB); cBz = derived->convertCoords(fieldEntity->BzPosition(i, j, k), fieldEntity->timeShiftB); fieldEntity->Bx(i, j, k) = fB("x"_a = cBx.x, "y"_a = cBx.y, "z"_a = cBx.z).template cast<FP3>().x; fieldEntity->By(i, j, k) = fB("x"_a = cBy.x, "y"_a = cBy.y, "z"_a = cBy.z).template cast<FP3>().y; fieldEntity->Bz(i, j, k) = fB("x"_a = cBz.x, "y"_a = cBz.y, "z"_a = cBz.z).template cast<FP3>().z; } } void setBxyz(int64_t _fBx, int64_t _fBy, int64_t _fBz) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP(*fBx)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fBx; FP(*fBy)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fBy; FP(*fBz)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fBz; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cBx, cBy, cBz; cBx = derived->convertCoords(fieldEntity->BxPosition(i, j, k), fieldEntity->timeShiftB); cBy = derived->convertCoords(fieldEntity->ByPosition(i, j, k), fieldEntity->timeShiftB); cBz = derived->convertCoords(fieldEntity->BzPosition(i, j, k), fieldEntity->timeShiftB); fieldEntity->Bx(i, j, k) = fBx(cBx.x, cBx.y, cBx.z); fieldEntity->By(i, j, k) = fBy(cBy.x, cBy.y, cBy.z); fieldEntity->Bz(i, j, k) = fBz(cBz.x, cBz.y, cBz.z); } } void setBxyzt(int64_t _fBx, int64_t _fBy, int64_t _fBz, FP t) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP(*fBx)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fBx; FP(*fBy)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fBy; FP(*fBz)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fBz; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cBx, cBy, cBz; cBx = derived->convertCoords(fieldEntity->BxPosition(i, j, k), fieldEntity->timeShiftB); cBy = derived->convertCoords(fieldEntity->ByPosition(i, j, k), fieldEntity->timeShiftB); cBz = derived->convertCoords(fieldEntity->BzPosition(i, j, k), fieldEntity->timeShiftB); fieldEntity->Bx(i, j, k) = fBx(cBx.x, cBx.y, cBx.z, t + fieldEntity->timeShiftB); fieldEntity->By(i, j, k) = fBy(cBy.x, cBy.y, cBy.z, t + fieldEntity->timeShiftB); fieldEntity->Bz(i, j, k) = fBz(cBz.x, cBz.y, cBz.z, t + fieldEntity->timeShiftB); } } void setB(int64_t _fB) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP3(*fB)(FP, FP, FP) = (FP3(*)(FP, FP, FP))_fB; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cBx, cBy, cBz; cBx = derived->convertCoords(fieldEntity->BxPosition(i, j, k), fieldEntity->timeShiftB); cBy = derived->convertCoords(fieldEntity->ByPosition(i, j, k), fieldEntity->timeShiftB); cBz = derived->convertCoords(fieldEntity->BzPosition(i, j, k), fieldEntity->timeShiftB); fieldEntity->Bx(i, j, k) = fB(cBx.x, cBx.y, cBx.z).x; fieldEntity->By(i, j, k) = fB(cBy.x, cBy.y, cBy.z).y; fieldEntity->Bz(i, j, k) = fB(cBz.x, cBz.y, cBz.z).z; } } void pySetJxyz(py::function fJx, py::function fJy, py::function fJz) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cJx, cJy, cJz; cJx = derived->convertCoords(fieldEntity->JxPosition(i, j, k), fieldEntity->timeShiftJ); cJy = derived->convertCoords(fieldEntity->JyPosition(i, j, k), fieldEntity->timeShiftJ); cJz = derived->convertCoords(fieldEntity->JzPosition(i, j, k), fieldEntity->timeShiftJ); fieldEntity->Jx(i, j, k) = fJx("x"_a = cJx.x, "y"_a = cJx.y, "z"_a = cJx.z).template cast<FP>(); fieldEntity->Jy(i, j, k) = fJy("x"_a = cJy.x, "y"_a = cJy.y, "z"_a = cJy.z).template cast<FP>(); fieldEntity->Jz(i, j, k) = fJz("x"_a = cJz.x, "y"_a = cJz.y, "z"_a = cJz.z).template cast<FP>(); } } void pySetJ(py::function fJ) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cJx, cJy, cJz; cJx = derived->convertCoords(fieldEntity->JxPosition(i, j, k), fieldEntity->timeShiftJ); cJy = derived->convertCoords(fieldEntity->JyPosition(i, j, k), fieldEntity->timeShiftJ); cJz = derived->convertCoords(fieldEntity->JzPosition(i, j, k), fieldEntity->timeShiftJ); fieldEntity->Jx(i, j, k) = fJ("x"_a = cJx.x, "y"_a = cJx.y, "z"_a = cJx.z).template cast<FP3>().x; fieldEntity->Jy(i, j, k) = fJ("x"_a = cJy.x, "y"_a = cJy.y, "z"_a = cJy.z).template cast<FP3>().y; fieldEntity->Jz(i, j, k) = fJ("x"_a = cJz.x, "y"_a = cJz.y, "z"_a = cJz.z).template cast<FP3>().z; } } void setJxyz(int64_t _fJx, int64_t _fJy, int64_t _fJz) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP(*fJx)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fJx; FP(*fJy)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fJy; FP(*fJz)(FP, FP, FP) = (FP(*)(FP, FP, FP))_fJz; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cJx, cJy, cJz; cJx = derived->convertCoords(fieldEntity->JxPosition(i, j, k), fieldEntity->timeShiftJ); cJy = derived->convertCoords(fieldEntity->JyPosition(i, j, k), fieldEntity->timeShiftJ); cJz = derived->convertCoords(fieldEntity->JzPosition(i, j, k), fieldEntity->timeShiftJ); fieldEntity->Jx(i, j, k) = fJx(cJx.x, cJx.y, cJx.z); fieldEntity->Jy(i, j, k) = fJy(cJy.x, cJy.y, cJy.z); fieldEntity->Jz(i, j, k) = fJz(cJz.x, cJz.y, cJz.z); } } void setJxyzt(int64_t _fJx, int64_t _fJy, int64_t _fJz, FP t) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP(*fJx)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fJx; FP(*fJy)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fJy; FP(*fJz)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))_fJz; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cJx, cJy, cJz; cJx = derived->convertCoords(fieldEntity->JxPosition(i, j, k), fieldEntity->timeShiftJ); cJy = derived->convertCoords(fieldEntity->JyPosition(i, j, k), fieldEntity->timeShiftJ); cJz = derived->convertCoords(fieldEntity->JzPosition(i, j, k), fieldEntity->timeShiftJ); fieldEntity->Jx(i, j, k) = fJx(cJx.x, cJx.y, cJx.z, t + fieldEntity->timeShiftJ); fieldEntity->Jy(i, j, k) = fJy(cJy.x, cJy.y, cJy.z, t + fieldEntity->timeShiftJ); fieldEntity->Jz(i, j, k) = fJz(cJz.x, cJz.y, cJz.z, t + fieldEntity->timeShiftJ); } } void setJ(int64_t _fJ) { TDerived* derived = static_cast<TDerived*>(this); pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = derived->getFieldEntity(); FP3(*fJ)(FP, FP, FP) = (FP3(*)(FP, FP, FP))_fJ; #pragma omp parallel for for (int i = 0; i < fieldEntity->numCells.x; i++) for (int j = 0; j < fieldEntity->numCells.y; j++) for (int k = 0; k < fieldEntity->numCells.z; k++) { FP3 cJx, cJy, cJz; cJx = derived->convertCoords(fieldEntity->JxPosition(i, j, k), fieldEntity->timeShiftJ); cJy = derived->convertCoords(fieldEntity->JyPosition(i, j, k), fieldEntity->timeShiftJ); cJz = derived->convertCoords(fieldEntity->JzPosition(i, j, k), fieldEntity->timeShiftJ); fieldEntity->Jx(i, j, k) = fJ(cJx.x, cJx.y, cJx.z).x; fieldEntity->Jy(i, j, k) = fJ(cJy.x, cJy.y, cJy.z).y; fieldEntity->Jz(i, j, k) = fJ(cJz.x, cJz.y, cJz.z).z; } } FP3 getE(const FP3& coords) const { pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = static_cast<const TDerived*>(this)->getFieldEntity(); FP3 result; if (isAnalytical) { FP(*fx)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))fEt[0]; FP(*fy)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))fEt[1]; FP(*fz)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))fEt[2]; FP time = fieldEntity->globalTime + fieldEntity->timeShiftE; result[0] = fx(coords.x, coords.y, coords.z, time); result[1] = fy(coords.x, coords.y, coords.z, time); result[2] = fz(coords.x, coords.y, coords.z, time); } else { result = fieldEntity->getE(coords); } return result; } FP3 getB(const FP3& coords) const { pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = static_cast<const TDerived*>(this)->getFieldEntity(); FP3 result; if (isAnalytical) { FP(*fx)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))fBt[0]; FP(*fy)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))fBt[1]; FP(*fz)(FP, FP, FP, FP) = (FP(*)(FP, FP, FP, FP))fBt[2]; FP time = fieldEntity->globalTime + fieldEntity->timeShiftB; result[0] = fx(coords.x, coords.y, coords.z, time); result[1] = fy(coords.x, coords.y, coords.z, time); result[2] = fz(coords.x, coords.y, coords.z, time); } else { result = fieldEntity->getB(coords); } return result; } FP3 getJ(const FP3& coords) const { return static_cast<const TDerived*>(this)->getFieldEntity()->getJ(coords); } void getFields(const FP3& coords, FP3& e, FP3& b) const { static_cast<const TDerived*>(this)->getFieldEntity()->getFields(coords, e, b); } private: int64_t fEt[3], fBt[3]; bool isAnalytical; }; template <class TGrid, class TFieldSolver, class TDerived, bool> class pyPoissonFieldSolverInterface {}; template <class TGrid, class TFieldSolver, class TDerived> class pyPoissonFieldSolverInterface<TGrid, TFieldSolver, TDerived, true> { public: void convertFieldsPoissonEquation() { static_cast<TDerived*>(this)->getFieldEntity()->convertFieldsPoissonEquation(); } }; template <class TGrid, class TFieldSolver, class TDerived> class pyPoissonFieldSolverInterface<TGrid, TFieldSolver, TDerived, false> { public: void convertFieldsPoissonEquation() { std::cout << "WARNING: the used field does not include the 'convertFieldsPoissonEquation' method" << std::endl; } }; template <class TGrid, class TFieldSolver, class TDerived, bool> class pyFieldGeneratorSolverInterface {}; template <class TGrid, class TFieldSolver, class TDerived> class pyFieldGeneratorSolverInterface<TGrid, TFieldSolver, TDerived, true> { public: void setFieldGenerator(FieldGenerator<TGrid::gridType>* generator) { static_cast<TDerived*>(this)->getFieldEntity()->setFieldGenerator(generator); } }; template <class TGrid, class TFieldSolver, class TDerived> class pyFieldGeneratorSolverInterface<TGrid, TFieldSolver, TDerived, false> { public: void setFieldGenerator(FieldGenerator<TGrid::gridType>* generator) { std::cout << "WARNING: the used field does not include the 'setFieldGenerator' method" << std::endl; } }; template<class TGrid, class TFieldSolver, class TDerived> class pyFieldSolverInterface : public pyPoissonFieldSolverInterface<TGrid, TFieldSolver, TDerived, std::is_same<TFieldSolver, PSATD>::value || std::is_same<TFieldSolver, PSATDPoisson>::value || std::is_same<TFieldSolver, PSATDTimeStraggered>::value || std::is_same<TFieldSolver, PSATDTimeStraggeredPoisson>::value>, public pyFieldGeneratorSolverInterface<TGrid, TFieldSolver, TDerived, std::is_same<TFieldSolver, FDTD>::value> { public: void setTime(FP time) { static_cast<TDerived*>(this)->getFieldEntity()->globalTime = time; } FP getTime() { return static_cast<TDerived*>(this)->getFieldEntity()->globalTime; } void setPML(int sizePMLx, int sizePMLy, int sizePMLz) { static_cast<TDerived*>(this)->getFieldEntity()->setPML(sizePMLx, sizePMLy, sizePMLz); } void changeTimeStep(double dt) { static_cast<TDerived*>(this)->getFieldEntity()->setTimeStep(dt); } void updateFields() { static_cast<TDerived*>(this)->getFieldEntity()->updateFields(); } void advance(FP dt) { pyFieldEntity<TGrid, TFieldSolver>* fieldEntity = static_cast<TDerived*>(this)->getFieldEntity(); FP oldDt = fieldEntity->dt; fieldEntity->setTimeStep(dt); fieldEntity->updateFields(); fieldEntity->setTimeStep(oldDt); } }; template<class TGrid, class TFieldSolver, class TDerived> class pyFieldInterface: public pyFieldGridInterface<TGrid, TFieldSolver, TDerived>, public pyFieldSolverInterface<TGrid, TFieldSolver, TDerived> { public: using BaseGridInterface = pyFieldGridInterface<TGrid, TFieldSolver, TDerived>; using BaseSolverInterface = pyFieldSolverInterface<TGrid, TFieldSolver, TDerived>; TGrid* getGrid() const { return static_cast<TGrid*>(static_cast<const TDerived*>(this)->getFieldEntity()); } TFieldSolver* getFieldSolver() const { return static_cast<TFieldSolver*>(static_cast<const TDerived*>(this)->getFieldEntity()); } void refresh() { static_cast<TDerived*>(this)->getFieldEntity()->refresh(); } }; class pyFieldBase { public: virtual FP3 getE(const FP3& coords) const = 0; virtual FP3 getB(const FP3& coords) const = 0; virtual FP3 getJ(const FP3& coords) const = 0; void getFields(const FP3& coords, FP3& e, FP3& b) const { e = getE(coords); b = getB(coords); } virtual void updateFields() = 0; virtual void advance(FP dt) = 0; virtual std::shared_ptr<pyFieldBase> applyMapping( const std::shared_ptr<pyFieldBase>& self, const std::shared_ptr<Mapping>& mapping) const = 0; }; template<class TGrid, class TFieldSolver> class pyField : public pyFieldInterface<TGrid, TFieldSolver, pyField<TGrid, TFieldSolver>>, public pyFieldBase { using BaseInterface = pyFieldInterface<TGrid, TFieldSolver, pyField<TGrid, TFieldSolver>>; public: pyField(const Int3 & numInternalCells, const FP3 & minCoords, const FP3 & steps, FP dt) : fieldEntity(new pyFieldEntity<TGrid, TFieldSolver>(numInternalCells, minCoords, steps, dt)) {} pyField(const std::shared_ptr<pyField<TGrid, TFieldSolver>>& other, const std::shared_ptr<Mapping>& mapping) : pyWrappedField(other), mapping(mapping) {} inline pyFieldEntity<TGrid, TFieldSolver>* getFieldEntity() const { if (fieldEntity) return fieldEntity.get(); return pyWrappedField->getFieldEntity(); } inline FP3 convertCoords(const FP3& coords, FP timeShift = 0.0) const { bool status = true; return getDirectCoords(coords, getFieldEntity()->globalTime + timeShift, &status); } std::shared_ptr<pyFieldBase> applyMapping( const std::shared_ptr<pyFieldBase>& self, const std::shared_ptr<Mapping>& mapping) const override { return std::static_pointer_cast<pyFieldBase>( std::make_shared<pyField<TGrid, TFieldSolver>>( std::static_pointer_cast<pyField<TGrid, TFieldSolver>>(self), mapping ) ); } inline FP3 getE(const FP3& coords) const override { bool status = true; FP time = getFieldEntity()->globalTime + getFieldEntity()->timeShiftE; FP3 inverseCoords = getInverseCoords(coords, time, &status); if (!status) return FP3(0, 0, 0); return BaseInterface::getE(inverseCoords); } inline FP3 getB(const FP3& coords) const override { bool status = true; FP time = getFieldEntity()->globalTime + getFieldEntity()->timeShiftB; FP3 inverseCoords = getInverseCoords(coords, time, &status); if (!status) return FP3(0, 0, 0); return BaseInterface::getB(inverseCoords); } FP3 getJ(const FP3& coords) const override { bool status = true; FP time = getFieldEntity()->globalTime + getFieldEntity()->timeShiftJ; FP3 inverseCoords = getInverseCoords(coords, time, &status); if (!status) return FP3(0, 0, 0); return BaseInterface::getJ(inverseCoords); } void updateFields() override { return BaseInterface::updateFields(); } void advance(FP dt) override { return BaseInterface::advance(dt); } protected: inline FP3 getDirectCoords(const FP3& coords, FP time, bool* status) const { FP3 coords_ = coords; *status = true; if (pyWrappedField) coords_ = pyWrappedField->getDirectCoords(coords_, time, status); bool status2 = true; if (mapping) coords_ = mapping->getDirectCoords(coords_, time, &status2); *status = *status && status2; return coords_; } inline FP3 getInverseCoords(const FP3& coords, FP time, bool* status) const { FP3 coords_ = coords; *status = true; if (pyWrappedField) coords_ = pyWrappedField->getInverseCoords(coords_, time, status); bool status2 = true; if (mapping) coords_ = mapping->getInverseCoords(coords_, time, &status2); *status = *status && status2; return coords_; } private: // the simple grid state // if fieldEntity!=0 then pyField is a memory owner std::unique_ptr<pyFieldEntity<TGrid, TFieldSolver>> fieldEntity; // the mapping grid state // if pyWrappedField!=0 then pyField is a wrapper std::shared_ptr<pyField<TGrid, TFieldSolver>> pyWrappedField; std::shared_ptr<Mapping> mapping; }; typedef pyField<YeeGrid, FDTD> pyYeeField; typedef pyField<PSTDGrid, PSTD> pyPSTDField; typedef pyField<PSATDGrid, PSATD> pyPSATDField; typedef pyField<PSATDGrid, PSATDPoisson> pyPSATDPoissonField; typedef pyField<PSATDTimeStraggeredGrid, PSATDTimeStraggered> pyPSATDTimeStraggeredField; typedef pyField<PSATDTimeStraggeredGrid, PSATDTimeStraggeredPoisson> pyPSATDTimeStraggeredPoissonField; class pySumField : public pyFieldBase { public: pySumField(const std::shared_ptr<pyFieldBase>& pyWrappedField1, const std::shared_ptr<pyFieldBase>& pyWrappedField2) : pyWrappedField1(pyWrappedField1), pyWrappedField2(pyWrappedField2) {} pySumField(const std::shared_ptr<pySumField>& other, const std::shared_ptr<Mapping>& mapping) : pyWrappedField1(other->pyWrappedField1->applyMapping(other->pyWrappedField1, mapping)), pyWrappedField2(other->pyWrappedField2->applyMapping(other->pyWrappedField2, mapping)) {} std::shared_ptr<pyFieldBase> applyMapping( const std::shared_ptr<pyFieldBase>& self, const std::shared_ptr<Mapping>& mapping) const override { return std::static_pointer_cast<pyFieldBase>( std::make_shared<pySumField>( std::static_pointer_cast<pySumField>(self), mapping ) ); } FP3 getE(const FP3& coords) const override { return pyWrappedField1->getE(coords) + pyWrappedField2->getE(coords); } FP3 getB(const FP3& coords) const override { return pyWrappedField1->getB(coords) + pyWrappedField2->getB(coords); } FP3 getJ(const FP3& coords) const override { return pyWrappedField1->getJ(coords) + pyWrappedField2->getJ(coords); } void updateFields() override { pyWrappedField1->updateFields(); pyWrappedField2->updateFields(); } void advance(FP dt) override { pyWrappedField1->advance(dt); pyWrappedField2->advance(dt); } private: std::shared_ptr<pyFieldBase> pyWrappedField1; std::shared_ptr<pyFieldBase> pyWrappedField2; }; class pyMulField : public pyFieldBase { public: pyMulField(const std::shared_ptr<pyFieldBase>& pyWrappedField, FP factor) : pyWrappedField(pyWrappedField), factor(factor) {} pyMulField(const std::shared_ptr<pyMulField>& other, const std::shared_ptr<Mapping>& mapping) : pyWrappedField(other->pyWrappedField->applyMapping(other->pyWrappedField, mapping)) {} std::shared_ptr<pyFieldBase> applyMapping( const std::shared_ptr<pyFieldBase>& self, const std::shared_ptr<Mapping>& mapping) const override { return std::static_pointer_cast<pyFieldBase>( std::make_shared<pyMulField>( std::static_pointer_cast<pyMulField>(self), mapping ) ); } FP3 getE(const FP3& coords) const override { return pyWrappedField->getE(coords) * factor; } FP3 getB(const FP3& coords) const override { return pyWrappedField->getB(coords) * factor; } FP3 getJ(const FP3& coords) const override { return pyWrappedField->getJ(coords) * factor; } void updateFields() override { pyWrappedField->updateFields(); } void advance(FP dt) override { pyWrappedField->advance(dt); } private: FP factor = 1.0; std::shared_ptr<pyFieldBase> pyWrappedField; }; }
helpme.h
// BEGINLICENSE // // This file is part of helPME, which is distributed under the BSD 3-clause license, // as described in the LICENSE file in the top level directory of this project. // // Author: Andrew C. Simmonett // // ENDLICENSE #ifndef _HELPME_HELPME_H_ #define _HELPME_HELPME_H_ #if __cplusplus || DOXYGEN // C++ header #include <algorithm> #include <array> #include <cmath> #include <complex> #include <functional> #include <iostream> #include <memory> #ifdef _OPENMP #include <omp.h> #endif #include <stdexcept> #include <string> #include <tuple> #include <unistd.h> #include <vector> #include "cartesiantransform.h" #include "fftw_wrapper.h" #include "gamma.h" #include "gridsize.h" #include "matrix.h" #include "memory.h" #if HAVE_MPI == 1 #include "mpi_wrapper.h" #else typedef struct ompi_communicator_t *MPI_Comm; #endif #include "powers.h" #include "splines.h" #include "string_utils.h" /*! * \file helpme.h * \brief Contains the C++ implementation of a PME Instance, and related helper classes. */ namespace helpme { /*! * \brief nCartesian computes the total number of Cartesian components of a given angular momentum. * \param L the angular momentum. * \return total number of components up to and including angular momentum L. */ static int nCartesian(int L) { return (L + 1) * (L + 2) * (L + 3) / 6; } /*! * \brief cartAddress computes the address of a term with given quantum numbers in a Cartesian buffer. * \param lx the x quantum number. * \param ly the y quantum number. * \param lz the z quantum number. * \return the address of an {lx, ly, lz} quantity in a buffer that contains all lower angular momentum terms too. */ static int cartAddress(int lx, int ly, int lz) { int l = lx + ly + lz; return l * (l + 1) * (l + 2) / 6 + lz * (l * 2 - lz + 3) / 2 + ly; } // This is used to define function pointers in the constructor, and makes it easy to add new kernels. #define ENABLE_KERNEL_WITH_INVERSE_R_EXPONENT_OF(n) \ case n: \ convolveEVFxn_ = &convolveEVImpl<n>; \ cacheInfluenceFunctionFxn_ = &cacheInfluenceFunctionImpl<n>; \ slfEFxn_ = &slfEImpl<n>; \ dirEFxn_ = &dirEImpl<n>; \ adjEFxn_ = &adjEImpl<n>; \ dirEFFxn_ = &dirEFImpl<n>; \ adjEFFxn_ = &adjEFImpl<n>; \ break; /*! * \class splineCacheEntry * \brief A placeholder to encapsulate information about a given atom's splines */ template <typename Real> struct SplineCacheEntry { BSpline<Real> aSpline, bSpline, cSpline; int absoluteAtomNumber; SplineCacheEntry(int order, int derivativeLevel) : aSpline(0, 0, order, derivativeLevel), bSpline(0, 0, order, derivativeLevel), cSpline(0, 0, order, derivativeLevel), absoluteAtomNumber(-1) {} }; /*! * \class PMEInstance * \brief A class to encapsulate information related to a particle mesh Ewald calculation. * * By storing information related to a single PME calculation in this way, we allow multiple * instances to be created in calculations requiring multiple PMEs, e.g. for computing both * electrostatic and attractive dispersion terms using PME to handle long-range interactions. * \tparam Real the floating point type to use for arithmetic. */ template <typename Real> class PMEInstance { using GridIterator = std::vector<std::vector<std::pair<short, short>>>; using Complex = std::complex<Real>; using Spline = BSpline<Real>; using RealMat = Matrix<Real>; using RealVec = helpme::vector<Real>; public: /*! * \brief The different conventions for orienting a lattice constructed from input parameters. */ enum class LatticeType : int { XAligned = 0, ShapeMatrix = 1 }; /*! * \brief The different conventions for numbering nodes. */ enum class NodeOrder : int { ZYX = 0 }; protected: /// The FFT grid dimensions in the {A,B,C} grid dimensions. int dimA_, dimB_, dimC_; /// The full A dimension after real->complex transformation. int complexDimA_; /// The locally owned A dimension after real->complex transformation. int myComplexDimA_; /// The order of the cardinal B-Spline used for interpolation. int splineOrder_; /// The actual number of threads per MPI instance, and the number requested previously. int nThreads_, requestedNumberOfThreads_; /// The exponent of the (inverse) interatomic distance used in this kernel. int rPower_; /// The scale factor to apply to all energies and derivatives. Real scaleFactor_; /// The attenuation parameter, whose units should be the inverse of those used to specify coordinates. Real kappa_; /// The lattice vectors. RealMat boxVecs_; /// The reciprocal lattice vectors. RealMat recVecs_; /// The scaled reciprocal lattice vectors, for transforming forces from scaled fractional coordinates. RealMat scaledRecVecs_; /// An iterator over angular momentum components. std::vector<std::array<short, 3>> angMomIterator_; /// The number of permutations of each multipole component. RealVec permutations_; /// From a given starting point on the {A,B,C} edge of the grid, lists all points to be handled, correctly wrapping /// around the end. GridIterator gridIteratorA_, gridIteratorB_, gridIteratorC_; /// The (inverse) bspline moduli to normalize the spreading / probing steps; these are folded into the convolution. RealVec splineModA_, splineModB_, splineModC_; /// The cached influence function involved in the convolution. RealVec cachedInfluenceFunction_; /// A function pointer to call the approprate function to implement convolution with virial, templated to /// the rPower value. std::function<Real(int, int, int, int, int, int, int, Real, Complex *, const RealMat &, Real, Real, const Real *, const Real *, const Real *, RealMat &, int)> convolveEVFxn_; /// A function pointer to call the approprate function to implement cacheing of the influence function that appears // in the convolution, templated to the rPower value. std::function<void(int, int, int, int, int, int, int, Real, RealVec &, const RealMat &, Real, Real, const Real *, const Real *, const Real *, int)> cacheInfluenceFunctionFxn_; /// A function pointer to call the approprate function to compute self energy, templated to the rPower value. std::function<Real(int, const RealMat &, Real, Real)> slfEFxn_; /// A function pointer to call the approprate function to compute the direct energy, templated to the rPower value. std::function<Real(Real, Real)> dirEFxn_; /// A function pointer to call the approprate function to compute the adjusted energy, templated to the rPower /// value. std::function<Real(Real, Real)> adjEFxn_; /// A function pointer to call the approprate function to compute the direct energy and force, templated to the /// rPower value. std::function<std::tuple<Real, Real>(Real, Real, Real)> dirEFFxn_; /// A function pointer to call the approprate function to compute the adjusted energy and force, templated to the /// rPower value. std::function<std::tuple<Real, Real>(Real, Real, Real)> adjEFFxn_; #if HAVE_MPI == 1 /// The communicator object that handles interactions with MPI. std::unique_ptr<MPIWrapper<Real>> mpiCommunicator_; /// The communicator object that handles interactions with MPI along this nodes {A,B,C} pencils. std::unique_ptr<MPIWrapper<Real>> mpiCommunicatorA_, mpiCommunicatorB_, mpiCommunicatorC_; #endif /// The number of nodes in the {A,B,C} dimensions. int numNodesA_, numNodesB_, numNodesC_; /// The rank of this node along the {A,B,C} dimensions. int rankA_, rankB_, rankC_; /// The first grid point that this node is responsible for in the {A,B,C} dimensions. int firstA_, firstB_, firstC_; /// The grid point beyond the last point that this this node is responsible for in the {A,B,C} dimensions. int lastA_, lastB_, lastC_; /// The {X,Y,Z} dimensions of the locally owned chunk of the grid. int myDimA_, myDimB_, myDimC_; /// The subsets of a given dimension to be processed when doing a transform along another dimension. int subsetOfCAlongA_, subsetOfCAlongB_, subsetOfBAlongC_; /// The size of a cache line, in units of the size of the Real type, to allow better memory allocation policies. Real cacheLineSizeInReals_; /// The current unit cell parameters. Real cellA_, cellB_, cellC_, cellAlpha_, cellBeta_, cellGamma_; /// Whether the unit cell parameters have been changed, invalidating cached gF quantities. bool unitCellHasChanged_; /// Whether the kappa has been changed, invalidating kappa-dependent quantities. bool kappaHasChanged_; /// Whether any of the grid dimensions have changed. bool gridDimensionHasChanged_; /// Whether the spline order has changed. bool splineOrderHasChanged_; /// Whether the scale factor has changed. bool scaleFactorHasChanged_; /// Whether the power of R has changed. bool rPowerHasChanged_; /// Whether the parallel node setup has changed in any way. bool numNodesHasChanged_; /// The type of alignment scheme used for the lattice vectors. LatticeType latticeType_; /// Communication buffers for MPI parallelism. helpme::vector<Complex> workSpace1_, workSpace2_; /// FFTW wrappers to help with transformations in the {A,B,C} dimensions. FFTWWrapper<Real> fftHelperA_, fftHelperB_, fftHelperC_; /// The list of atoms, and their fractional coordinates, that will contribute to this node. std::vector<std::tuple<int, Real, Real, Real>> atomList_; /// The cached list of splines, which is stored as a member to make it persistent. std::vector<SplineCacheEntry<Real>> splineCache_; /*! * \brief A simple helper to compute factorials. * \param n the number whose factorial is to be taken. * \return n! */ unsigned int factorial(unsigned int n) { unsigned int ret = 1; for (unsigned int i = 1; i <= n; ++i) ret *= i; return ret; } /*! * \brief makeGridIterator makes an iterator over the spline values that contribute to this node's grid * in a given Cartesian dimension. The iterator is of the form (grid point, spline index) and is * sorted by increasing grid point, for cache efficiency. * \param dimension the dimension of the grid in the Cartesian dimension of interest. * \param first the first grid point in the Cartesian dimension to be handled by this node. * \param last the element past the last grid point in the Cartesian dimension to be handled by this node. * \return the vector of spline iterators for each starting grid point. */ GridIterator makeGridIterator(int dimension, int first, int last) const { GridIterator gridIterator; for (int gridStart = 0; gridStart < dimension; ++gridStart) { std::vector<std::pair<short, short>> splineIterator(splineOrder_); splineIterator.clear(); for (int splineIndex = 0; splineIndex < splineOrder_; ++splineIndex) { int gridPoint = (splineIndex + gridStart) % dimension; if (gridPoint >= first && gridPoint < last) splineIterator.push_back(std::make_pair(gridPoint - first, splineIndex)); } splineIterator.shrink_to_fit(); std::sort(splineIterator.begin(), splineIterator.end()); gridIterator.push_back(splineIterator); } gridIterator.shrink_to_fit(); return gridIterator; } /*! Make sure that the iterator over AM components is up to date. * \param angMom the angular momentum required for the iterator over multipole components. */ void updateAngMomIterator(int parameterAngMom) { auto L = parameterAngMom; size_t expectedNTerms = nCartesian(L); if (angMomIterator_.size() >= expectedNTerms) return; angMomIterator_.resize(expectedNTerms); permutations_.resize(expectedNTerms); for (short l = 0, count = 0; l <= L; ++l) { for (short lz = 0; lz <= l; ++lz) { for (short ly = 0; ly <= l - lz; ++ly) { short lx = l - ly - lz; angMomIterator_[count] = {{static_cast<short>(lx), static_cast<short>(ly), static_cast<short>(lz)}}; permutations_[count] = (Real)factorial(l) / (factorial(lx) * factorial(ly) * factorial(lz)); ++count; } } } } /*! * \brief updateInfluenceFunction builds the gF array cache, if the lattice vector has changed since the last * build of it. If the cell is unchanged, this does nothing. */ void updateInfluenceFunction() { if (unitCellHasChanged_ || kappaHasChanged_ || gridDimensionHasChanged_ || splineOrderHasChanged_ || scaleFactorHasChanged_ || numNodesHasChanged_) { cacheInfluenceFunctionFxn_(dimA_, dimB_, dimC_, myComplexDimA_, myDimB_ / numNodesC_, rankA_ * myComplexDimA_, rankB_ * myDimB_ + rankC_ * myDimB_ / numNodesC_, scaleFactor_, cachedInfluenceFunction_, recVecs_, cellVolume(), kappa_, &splineModA_[0], &splineModB_[0], &splineModC_[0], nThreads_); } } /*! * \brief filterAtomsAndBuildSplineCache builds a list of BSplines for only the atoms to be handled by this node. * \param splineDerivativeLevel the derivative level (parameter angular momentum + energy derivative level) of the * BSplines. \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. */ void filterAtomsAndBuildSplineCache(int splineDerivativeLevel, const RealMat &coords) { assertInitialized(); atomList_.clear(); size_t nAtoms = coords.nRows(); for (int atom = 0; atom < nAtoms; ++atom) { const Real *atomCoords = coords[atom]; constexpr float EPS = 1e-6; Real aCoord = atomCoords[0] * recVecs_(0, 0) + atomCoords[1] * recVecs_(1, 0) + atomCoords[2] * recVecs_(2, 0) - EPS; Real bCoord = atomCoords[0] * recVecs_(0, 1) + atomCoords[1] * recVecs_(1, 1) + atomCoords[2] * recVecs_(2, 1) - EPS; Real cCoord = atomCoords[0] * recVecs_(0, 2) + atomCoords[1] * recVecs_(1, 2) + atomCoords[2] * recVecs_(2, 2) - EPS; // Make sure the fractional coordinates fall in the range 0 <= s < 1 aCoord -= floor(aCoord); bCoord -= floor(bCoord); cCoord -= floor(cCoord); short aStartingGridPoint = dimA_ * aCoord; short bStartingGridPoint = dimB_ * bCoord; short cStartingGridPoint = dimC_ * cCoord; const auto &aGridIterator = gridIteratorA_[aStartingGridPoint]; const auto &bGridIterator = gridIteratorB_[bStartingGridPoint]; const auto &cGridIterator = gridIteratorC_[cStartingGridPoint]; if (aGridIterator.size() && bGridIterator.size() && cGridIterator.size()) atomList_.emplace_back(atom, aCoord, bCoord, cCoord); } // Now we know how many atoms we loop over the dense list, redefining nAtoms accordingly. // The first stage above is to get the number of atoms, so we can avoid calling push_back // and thus avoid the many memory allocations. If the cache is too small, grow it by a // certain scale factor to try and minimize allocations in a not-too-wasteful manner. nAtoms = atomList_.size(); if (splineCache_.size() < nAtoms) { size_t newSize = static_cast<size_t>(1.2 * nAtoms); for (int atom = splineCache_.size(); atom < newSize; ++atom) splineCache_.emplace_back(splineOrder_, splineDerivativeLevel); } for (int atomListNum = 0; atomListNum < nAtoms; ++atomListNum) { const auto &entry = atomList_[atomListNum]; const int absoluteAtomNumber = std::get<0>(entry); const Real aCoord = std::get<1>(entry); const Real bCoord = std::get<2>(entry); const Real cCoord = std::get<3>(entry); short aStartingGridPoint = dimA_ * aCoord; short bStartingGridPoint = dimB_ * bCoord; short cStartingGridPoint = dimC_ * cCoord; auto &atomSplines = splineCache_[atomListNum]; atomSplines.absoluteAtomNumber = absoluteAtomNumber; atomSplines.aSpline.update(aStartingGridPoint, dimA_ * aCoord - aStartingGridPoint, splineOrder_, splineDerivativeLevel); atomSplines.bSpline.update(bStartingGridPoint, dimB_ * bCoord - bStartingGridPoint, splineOrder_, splineDerivativeLevel); atomSplines.cSpline.update(cStartingGridPoint, dimC_ * cCoord - cStartingGridPoint, splineOrder_, splineDerivativeLevel); } } /*! * \brief Spreads parameters onto the grid for a single atom * \param atom the absolute atom number. * \param realGrid pointer to the array containing the grid in CBA order * \param nComponents the number of angular momentum components in the parameters. * \param nForceComponents the number of angular momentum components in the parameters with one extra * level of angular momentum to permit evaluation of forces. * \param splineA the BSpline object for the A direction. * \param splineB the BSpline object for the B direction. * \param splineC the BSpline object for the C direction. * \param parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, * etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode */ void spreadParametersImpl(const int &atom, Real *realGrid, const int &nComponents, const Spline &splineA, const Spline &splineB, const Spline &splineC, const RealMat &parameters) { const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()]; const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()]; const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()]; int numPointsA = static_cast<int>(aGridIterator.size()); int numPointsB = static_cast<int>(bGridIterator.size()); int numPointsC = static_cast<int>(cGridIterator.size()); const auto *iteratorDataA = aGridIterator.data(); const auto *iteratorDataB = bGridIterator.data(); const auto *iteratorDataC = cGridIterator.data(); for (int component = 0; component < nComponents; ++component) { const auto &quanta = angMomIterator_[component]; Real param = parameters(atom, component); const Real *splineValsA = splineA[quanta[0]]; const Real *splineValsB = splineB[quanta[1]]; const Real *splineValsC = splineC[quanta[2]]; for (int pointC = 0; pointC < numPointsC; ++pointC) { const auto &cPoint = iteratorDataC[pointC]; Real cValP = param * splineValsC[cPoint.second]; for (int pointB = 0; pointB < numPointsB; ++pointB) { const auto &bPoint = iteratorDataB[pointB]; Real cbValP = cValP * splineValsB[bPoint.second]; Real *cbRow = realGrid + cPoint.first * myDimB_ * myDimA_ + bPoint.first * myDimA_; for (int pointA = 0; pointA < numPointsA; ++pointA) { const auto &aPoint = iteratorDataA[pointA]; cbRow[aPoint.first] += cbValP * splineValsA[aPoint.second]; } } } } } /*! * \brief Probes the grid and computes the force for a single atom, specialized for zero parameter angular momentum. * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param splineA the BSpline object for the A direction. * \param splineB the BSpline object for the B direction. * \param splineC the BSpline object for the C direction. * \param parameter the list of parameter associated with the given atom. * \param forces a 3 vector of the forces for this atom, ordered in memory as {Fx, Fy, Fz}. */ void probeGridImpl(const Real *potentialGrid, const Spline &splineA, const Spline &splineB, const Spline &splineC, const Real &parameter, Real *forces) const { const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()]; const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()]; const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()]; // We unpack the vector to raw pointers, as profiling shows that using range based for loops over vectors // causes a signficant penalty in the innermost loop, primarily due to checking the loop stop condition. int numPointsA = static_cast<int>(aGridIterator.size()); int numPointsB = static_cast<int>(bGridIterator.size()); int numPointsC = static_cast<int>(cGridIterator.size()); const auto *iteratorDataA = aGridIterator.data(); const auto *iteratorDataB = bGridIterator.data(); const auto *iteratorDataC = cGridIterator.data(); const Real *splineStartA0 = splineA[0]; const Real *splineStartB0 = splineB[0]; const Real *splineStartC0 = splineC[0]; const Real *splineStartA1 = splineStartA0 + splineOrder_; const Real *splineStartB1 = splineStartB0 + splineOrder_; const Real *splineStartC1 = splineStartC0 + splineOrder_; Real Ex = 0, Ey = 0, Ez = 0; for (int pointC = 0; pointC < numPointsC; ++pointC) { const auto &cPoint = iteratorDataC[pointC]; const Real &splineC0 = splineStartC0[cPoint.second]; const Real &splineC1 = splineStartC1[cPoint.second]; for (int pointB = 0; pointB < numPointsB; ++pointB) { const auto &bPoint = iteratorDataB[pointB]; const Real &splineB0 = splineStartB0[bPoint.second]; const Real &splineB1 = splineStartB1[bPoint.second]; const Real *cbRow = potentialGrid + cPoint.first * myDimA_ * myDimB_ + bPoint.first * myDimA_; for (int pointA = 0; pointA < numPointsA; ++pointA) { const auto &aPoint = iteratorDataA[pointA]; const Real &splineA0 = splineStartA0[aPoint.second]; const Real &splineA1 = splineStartA1[aPoint.second]; const Real &gridVal = cbRow[aPoint.first]; Ey += gridVal * splineA0 * splineB1 * splineC0; Ez += gridVal * splineA0 * splineB0 * splineC1; Ex += gridVal * splineA1 * splineB0 * splineC0; } } } forces[0] -= parameter * (scaledRecVecs_[0][0] * Ex + scaledRecVecs_[0][1] * Ey + scaledRecVecs_[0][2] * Ez); forces[1] -= parameter * (scaledRecVecs_[1][0] * Ex + scaledRecVecs_[1][1] * Ey + scaledRecVecs_[1][2] * Ez); forces[2] -= parameter * (scaledRecVecs_[2][0] * Ex + scaledRecVecs_[2][1] * Ey + scaledRecVecs_[2][2] * Ez); } /*! * \brief Probes the grid and computes the force for a single atom, for arbitrary parameter angular momentum. * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param nPotentialComponents the number of components in the potential and its derivatives with one extra * level of angular momentum to permit evaluation of forces. * \param splineA the BSpline object for the A direction. * \param splineB the BSpline object for the B direction. * \param splineC the BSpline object for the C direction. * \param phiPtr a scratch array of length nPotentialComponents, to store the fractional potential. * N.B. Make sure that updateAngMomIterator() has been called first with the appropriate derivative * level for the requested potential derivatives. */ void probeGridImpl(const Real *potentialGrid, const int &nPotentialComponents, const Spline &splineA, const Spline &splineB, const Spline &splineC, Real *phiPtr) { const auto &aGridIterator = gridIteratorA_[splineA.startingGridPoint()]; const auto &bGridIterator = gridIteratorB_[splineB.startingGridPoint()]; const auto &cGridIterator = gridIteratorC_[splineC.startingGridPoint()]; const Real *splineStartA = splineA[0]; const Real *splineStartB = splineB[0]; const Real *splineStartC = splineC[0]; for (const auto &cPoint : cGridIterator) { for (const auto &bPoint : bGridIterator) { const Real *cbRow = potentialGrid + cPoint.first * myDimA_ * myDimB_ + bPoint.first * myDimA_; for (const auto &aPoint : aGridIterator) { Real gridVal = cbRow[aPoint.first]; for (int component = 0; component < nPotentialComponents; ++component) { const auto &quanta = angMomIterator_[component]; const Real *splineValsA = splineStartA + quanta[0] * splineOrder_; const Real *splineValsB = splineStartB + quanta[1] * splineOrder_; const Real *splineValsC = splineStartC + quanta[2] * splineOrder_; phiPtr[component] += gridVal * splineValsA[aPoint.second] * splineValsB[bPoint.second] * splineValsC[cPoint.second]; } } } } } /*! * \brief Probes the grid and computes the force for a single atom, for arbitrary parameter angular momentum. * \param atom the absolute atom number. * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param nComponents the number of angular momentum components in the parameters. * \param nForceComponents the number of angular momentum components in the parameters with one extra * level of angular momentum to permit evaluation of forces. * \param splineA the BSpline object for the A direction. * \param splineB the BSpline object for the B direction. * \param splineC the BSpline object for the C direction. * \param phiPtr a scratch array of length nForceComponents, to store the fractional potential. * \param parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, * etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. */ void probeGridImpl(const int &atom, const Real *potentialGrid, const int &nComponents, const int &nForceComponents, const Spline &splineA, const Spline &splineB, const Spline &splineC, Real *phiPtr, const RealMat &parameters, Real *forces) { std::fill(phiPtr, phiPtr + nForceComponents, 0); probeGridImpl(potentialGrid, nForceComponents, splineA, splineB, splineC, phiPtr); Real fracForce[3] = {0, 0, 0}; for (int component = 0; component < nComponents; ++component) { Real param = parameters(atom, component); const auto &quanta = angMomIterator_[component]; short lx = quanta[0]; short ly = quanta[1]; short lz = quanta[2]; fracForce[0] -= param * phiPtr[cartAddress(lx + 1, ly, lz)]; fracForce[1] -= param * phiPtr[cartAddress(lx, ly + 1, lz)]; fracForce[2] -= param * phiPtr[cartAddress(lx, ly, lz + 1)]; } forces[0] += scaledRecVecs_[0][0] * fracForce[0] + scaledRecVecs_[0][1] * fracForce[1] + scaledRecVecs_[0][2] * fracForce[2]; forces[1] += scaledRecVecs_[1][0] * fracForce[0] + scaledRecVecs_[1][1] * fracForce[1] + scaledRecVecs_[1][2] * fracForce[2]; forces[2] += scaledRecVecs_[2][0] * fracForce[0] + scaledRecVecs_[2][1] * fracForce[1] + scaledRecVecs_[2][2] * fracForce[2]; } /*! * \brief assertInitialized makes sure that setup() has been called before running any calculations. */ void assertInitialized() const { if (!rPower_) throw std::runtime_error( "Either setup(...) or setup_parallel(...) must be called before computing anything."); } /*! * \brief makeBSplines construct the {x,y,z} B-Splines. * \param atomCoords a 3-vector containing the atom's coordinates. * \param derivativeLevel level of derivative needed for the splines. * \return a 3-tuple containing the {x,y,z} B-splines. */ std::tuple<Spline, Spline, Spline> makeBSplines(const Real *atomCoords, short derivativeLevel) const { // Subtract a tiny amount to make sure we're not exactly on the rightmost (excluded) // grid point. The calculation is translationally invariant, so this is valid. constexpr float EPS = 1e-6f; Real aCoord = atomCoords[0] * recVecs_(0, 0) + atomCoords[1] * recVecs_(1, 0) + atomCoords[2] * recVecs_(2, 0) - EPS; Real bCoord = atomCoords[0] * recVecs_(0, 1) + atomCoords[1] * recVecs_(1, 1) + atomCoords[2] * recVecs_(2, 1) - EPS; Real cCoord = atomCoords[0] * recVecs_(0, 2) + atomCoords[1] * recVecs_(1, 2) + atomCoords[2] * recVecs_(2, 2) - EPS; // Make sure the fractional coordinates fall in the range 0 <= s < 1 aCoord -= floor(aCoord); bCoord -= floor(bCoord); cCoord -= floor(cCoord); short aStartingGridPoint = dimA_ * aCoord; short bStartingGridPoint = dimB_ * bCoord; short cStartingGridPoint = dimC_ * cCoord; Real aDistanceFromGridPoint = dimA_ * aCoord - aStartingGridPoint; Real bDistanceFromGridPoint = dimB_ * bCoord - bStartingGridPoint; Real cDistanceFromGridPoint = dimC_ * cCoord - cStartingGridPoint; return std::make_tuple(Spline(aStartingGridPoint, aDistanceFromGridPoint, splineOrder_, derivativeLevel), Spline(bStartingGridPoint, bDistanceFromGridPoint, splineOrder_, derivativeLevel), Spline(cStartingGridPoint, cDistanceFromGridPoint, splineOrder_, derivativeLevel)); } /*! * \brief sanityChecks just makes sure that inputs have consistent dimensions, and that prerequisites are * initialized. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). * \param parameters the input parameters. * \param coordinates the input coordinates. */ void sanityChecks(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { assertInitialized(); if (parameters.nRows() == 0) throw std::runtime_error("Parameters have not been set yet! Call setParameters(...) before runPME(...);"); if (coordinates.nRows() == 0) throw std::runtime_error( "Coordinates have not been set yet! Call setCoordinates(...) before runPME(...);"); if (boxVecs_.isNearZero()) throw std::runtime_error( "Lattice vectors have not been set yet! Call setLatticeVectors(...) before runPME(...);"); if (coordinates.nRows() != parameters.nRows()) throw std::runtime_error( "Inconsistent number of coordinates and parameters; there should be nAtoms of each."); if (parameters.nCols() != nCartesian(parameterAngMom)) throw std::runtime_error( "Mismatch in the number of parameters provided and the parameter angular momentum"); } /*! * \brief convolveEVImpl performs the reciprocal space convolution, returning the energy. We opt to not cache * this the same way as the non-virial version because it's safe to assume that if the virial is requested * the box is likely to change, which renders the cache useless. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param nx the grid dimension in the x direction. * \param ny the grid dimension in the y direction. * \param nz the grid dimension in the z direction. * \param myNx the subset of the grid in the x direction to be handled by this node. * \param myNy the subset of the grid in the y direction to be handled by this node. * \param startX the starting grid point handled by this node in the X direction. * \param startY the starting grid point handled by this node in the Y direction. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param gridPtr the Fourier space grid, with ordering YXZ. * \param boxInv the reciprocal lattice vectors. * \param volume the volume of the unit cell. * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param xMods the Fourier space norms of the x B-Splines. * \param yMods the Fourier space norms of the y B-Splines. * \param zMods the Fourier space norms of the z B-Splines. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \param nThreads the number of OpenMP threads to use. * \return the reciprocal space energy. */ template <int rPower> static Real convolveEVImpl(int nx, int ny, int nz, int myNx, int myNy, int startX, int startY, Real scaleFactor, Complex *gridPtr, const RealMat &boxInv, Real volume, Real kappa, const Real *xMods, const Real *yMods, const Real *zMods, RealMat &virial, int nThreads) { Real energy = 0; bool nodeZero = startX == 0 && startY == 0; if (rPower > 3 && nodeZero) { // Kernels with rPower>3 are absolutely convergent and should have the m=0 term present. // To compute it we need sum_ij c(i)c(j), which can be obtained from the structure factor norm. Real prefac = 2 * scaleFactor * M_PI * sqrtPi * pow(kappa, rPower - 3) / ((rPower - 3) * gammaComputer<Real, rPower>::value * volume); energy += prefac * std::norm(gridPtr[0]); } // Ensure the m=0 term convolution product is zeroed for the backtransform; it's been accounted for above. if (nodeZero) gridPtr[0] = Complex(0, 0); std::vector<Real> xMVals(myNx), yMVals(myNy), zMVals(nz); // Iterators to conveniently map {X,Y,Z} grid location to m_{X,Y,Z} value, where -1/2 << m/dim < 1/2. for (int kx = 0; kx < myNx; ++kx) xMVals[kx] = startX + (kx + startX >= (nx + 1) / 2 ? kx - nx : kx); for (int ky = 0; ky < myNy; ++ky) yMVals[ky] = startY + (ky + startY >= (ny + 1) / 2 ? ky - ny : ky); for (int kz = 0; kz < nz; ++kz) zMVals[kz] = kz >= (nz + 1) / 2 ? kz - nz : kz; Real bPrefac = M_PI * M_PI / (kappa * kappa); Real volPrefac = scaleFactor * pow(M_PI, rPower - 1) / (sqrtPi * gammaComputer<Real, rPower>::value * volume); int halfNx = nx / 2 + 1; size_t nxz = myNx * nz; Real Vxx = 0, Vxy = 0, Vyy = 0, Vxz = 0, Vyz = 0, Vzz = 0; const Real *boxPtr = boxInv[0]; const Real *xMPtr = xMVals.data(); const Real *yMPtr = yMVals.data(); const Real *zMPtr = zMVals.data(); size_t nyxz = myNy * nxz; // Exclude m=0 cell. int start = (nodeZero ? 1 : 0); // Writing the three nested loops in one allows for better load balancing in parallel. #pragma omp parallel for reduction(+ : energy, Vxx, Vxy, Vyy, Vxz, Vyz, Vzz) num_threads(nThreads) for (size_t yxz = start; yxz < nyxz; ++yxz) { size_t xz = yxz % nxz; short ky = yxz / nxz; short kx = xz / nz; short kz = xz % nz; // We only loop over the first nx/2+1 x values; this // accounts for the "missing" complex conjugate values. Real permPrefac = kx + startX != 0 && kx + startX != halfNx - 1 ? 2 : 1; const Real &mx = xMPtr[kx]; const Real &my = yMPtr[ky]; const Real &mz = zMPtr[kz]; Real mVecX = boxPtr[0] * mx + boxPtr[1] * my + boxPtr[2] * mz; Real mVecY = boxPtr[3] * mx + boxPtr[4] * my + boxPtr[5] * mz; Real mVecZ = boxPtr[6] * mx + boxPtr[7] * my + boxPtr[8] * mz; Real mNormSq = mVecX * mVecX + mVecY * mVecY + mVecZ * mVecZ; Real mTerm = raiseNormToIntegerPower<Real, rPower - 3>::compute(mNormSq); Real bSquared = bPrefac * mNormSq; auto gammas = incompleteGammaVirialComputer<Real, 3 - rPower>::compute(bSquared); Real eGamma = std::get<0>(gammas); Real vGamma = std::get<1>(gammas); Complex &gridVal = gridPtr[yxz]; Real structFacNorm = std::norm(gridVal); Real totalPrefac = volPrefac * mTerm * yMods[ky + startY] * xMods[kx + startX] * zMods[kz]; Real influenceFunction = totalPrefac * eGamma; gridVal *= influenceFunction; Real eTerm = permPrefac * influenceFunction * structFacNorm; Real vTerm = permPrefac * vGamma * totalPrefac / mNormSq * structFacNorm; energy += eTerm; Vxx += vTerm * mVecX * mVecX; Vxy += vTerm * mVecX * mVecY; Vyy += vTerm * mVecY * mVecY; Vxz += vTerm * mVecX * mVecZ; Vyz += vTerm * mVecY * mVecZ; Vzz += vTerm * mVecZ * mVecZ; } energy /= 2; virial[0][0] -= Vxx - energy; virial[0][1] -= Vxy; virial[0][2] -= Vyy - energy; virial[0][3] -= Vxz; virial[0][4] -= Vyz; virial[0][5] -= Vzz - energy; return energy; } /*! * \brief cacheInfluenceFunctionImpl computes the influence function used in convolution, for later use. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param nx the grid dimension in the x direction. * \param ny the grid dimension in the y direction. * \param nz the grid dimension in the z direction. * \param myNx the subset of the grid in the x direction to be handled by this node. * \param myNy the subset of the grid in the y direction to be handled by this node. * \param startX the starting grid point handled by this node in the X direction. * \param startY the starting grid point handled by this node in the Y direction. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param gridPtr the Fourier space grid, with ordering YXZ. * \param boxInv the reciprocal lattice vectors. * \param volume the volume of the unit cell. * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param xMods the Fourier space norms of the x B-Splines. * \param yMods the Fourier space norms of the y B-Splines. * \param zMods the Fourier space norms of the z B-Splines. * This vector is incremented, not assigned. * \param nThreads the number of OpenMP threads to use. * \return the energy for the m=0 term. */ template <int rPower> static void cacheInfluenceFunctionImpl(int nx, int ny, int nz, int myNx, int myNy, int startX, int startY, Real scaleFactor, RealVec &influenceFunction, const RealMat &boxInv, Real volume, Real kappa, const Real *xMods, const Real *yMods, const Real *zMods, int nThreads) { bool nodeZero = startX == 0 && startY == 0; size_t nxz = myNx * nz; size_t nyxz = myNy * nxz; influenceFunction.resize(nyxz); Real *gridPtr = influenceFunction.data(); if (nodeZero) gridPtr[0] = 0; std::vector<Real> xMVals(myNx), yMVals(myNy), zMVals(nz); // Iterators to conveniently map {X,Y,Z} grid location to m_{X,Y,Z} value, where -1/2 << m/dim < 1/2. for (int kx = 0; kx < myNx; ++kx) xMVals[kx] = startX + (kx + startX >= (nx + 1) / 2 ? kx - nx : kx); for (int ky = 0; ky < myNy; ++ky) yMVals[ky] = startY + (ky + startY >= (ny + 1) / 2 ? ky - ny : ky); for (int kz = 0; kz < nz; ++kz) zMVals[kz] = kz >= (nz + 1) / 2 ? kz - nz : kz; Real bPrefac = M_PI * M_PI / (kappa * kappa); Real volPrefac = scaleFactor * pow(M_PI, rPower - 1) / (sqrtPi * gammaComputer<Real, rPower>::value * volume); const Real *boxPtr = boxInv[0]; // Exclude m=0 cell. int start = (nodeZero ? 1 : 0); // Writing the three nested loops in one allows for better load balancing in parallel. #pragma omp parallel for num_threads(nThreads) for (size_t yxz = start; yxz < nyxz; ++yxz) { size_t xz = yxz % nxz; short ky = yxz / nxz; short kx = xz / nz; short kz = xz % nz; Real mx = (Real)xMVals[kx]; Real my = (Real)yMVals[ky]; Real mz = (Real)zMVals[kz]; Real mVecX = boxPtr[0] * mx + boxPtr[1] * my + boxPtr[2] * mz; Real mVecY = boxPtr[3] * mx + boxPtr[4] * my + boxPtr[5] * mz; Real mVecZ = boxPtr[6] * mx + boxPtr[7] * my + boxPtr[8] * mz; Real mNormSq = mVecX * mVecX + mVecY * mVecY + mVecZ * mVecZ; Real mTerm = raiseNormToIntegerPower<Real, rPower - 3>::compute(mNormSq); Real bSquared = bPrefac * mNormSq; Real incompleteGammaTerm = incompleteGammaComputer<Real, 3 - rPower>::compute(bSquared); gridPtr[yxz] = volPrefac * incompleteGammaTerm * mTerm * yMods[ky + startY] * xMods[kx + startX] * zMods[kz]; } } /*! * \brief dirEImpl computes the kernel for the direct energy for a pair. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param rSquared the square of the internuclear distance * \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates. * \return the energy kernel. */ template <int rPower> inline static Real dirEImpl(Real rSquared, Real kappaSquared) { Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared); Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) / gammaComputer<Real, rPower>::value; return gammaTerm / denominator; } /*! * \brief dirEFImpl computes the kernels for the direct energy and force for a pair. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param rSquared the square of the internuclear distance * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates. * \return a tuple containing the energy and force kernels, respectively. */ template <int rPower> inline static std::tuple<Real, Real> dirEFImpl(Real rSquared, Real kappa, Real kappaSquared) { Real rInv = 1 / rSquared; Real kappaToRPower = kappa; for (int i = 1; i < rPower; ++i) kappaToRPower *= kappa; Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared); Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) / gammaComputer<Real, rPower>::value; Real eKernel = gammaTerm / denominator; Real fKernel = -rPower * eKernel * rInv - 2 * rInv * exp(-kappaSquared * rSquared) * kappaToRPower / gammaComputer<Real, rPower>::value; return std::make_tuple(eKernel, fKernel); } /*! * \brief adjEImpl computes the kernel for the adjusted energy for a pair. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param rSquared the square of the internuclear distance * \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates. * \return the energy kernel. */ template <int rPower> inline static Real adjEImpl(Real rSquared, Real kappaSquared) { Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared); Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) / gammaComputer<Real, rPower>::value; return (gammaTerm - 1) / denominator; } /*! * \brief adjEFImpl computes the kernels for the adjusted energy and force for a pair. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param rSquared the square of the internuclear distance * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param kappaSquared the square of attenuation parameter in units inverse of those used to specify coordinates. * \return a tuple containing the energy and force kernels, respectively. */ template <int rPower> inline static std::tuple<Real, Real> adjEFImpl(Real rSquared, Real kappa, Real kappaSquared) { Real rInv = 1 / rSquared; Real kappaToRPower = kappa; for (int i = 1; i < rPower; ++i) kappaToRPower *= kappa; Real denominator = raiseNormToIntegerPower<Real, rPower>::compute(rSquared); Real gammaTerm = incompleteGammaComputer<Real, rPower>::compute(rSquared * kappaSquared) / gammaComputer<Real, rPower>::value; Real eKernel = (gammaTerm - 1) / denominator; Real fKernel = -rPower * eKernel * rInv - 2 * rInv * exp(-kappaSquared * rSquared) * kappaToRPower / gammaComputer<Real, rPower>::value; return std::make_tuple(eKernel, fKernel); } /*! * \brief slfEImpl computes the self energy due to particles feeling their own potential. * \tparam rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive dispersion). * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, * etc.). * \param parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, * etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof * (e.g. the 1 / [4 pi epslion0] for Coulomb calculations). * \return the self energy. N.B. there is no self force associated with this term. */ template <int rPower> static Real slfEImpl(int parameterAngMom, const RealMat &parameters, Real kappa, Real scaleFactor) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); size_t nAtoms = parameters.nRows(); Real prefac = -scaleFactor * std::pow(kappa, rPower) / (rPower * gammaComputer<Real, rPower>::value); Real sumCoefs = 0; for (size_t atom = 0; atom < nAtoms; ++atom) { sumCoefs += parameters(atom, 0) * parameters(atom, 0); } return prefac * sumCoefs; } /*! * \brief common_init sets up information that is common to serial and parallel runs. */ void common_init(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, Real scaleFactor, int nThreads) { kappaHasChanged_ = kappa != kappa_; rPowerHasChanged_ = rPower_ != rPower; gridDimensionHasChanged_ = dimA_ != dimA || dimB_ != dimB || dimC_ != dimC; splineOrderHasChanged_ = splineOrder_ != splineOrder; scaleFactorHasChanged_ = scaleFactor_ != scaleFactor; if (kappaHasChanged_ || rPowerHasChanged_ || gridDimensionHasChanged_ || splineOrderHasChanged_ || scaleFactorHasChanged_ || requestedNumberOfThreads_ != nThreads) { rPower_ = rPower; dimA_ = dimA; dimB_ = dimB; dimC_ = dimC; complexDimA_ = dimA / 2 + 1; myComplexDimA_ = myDimA_ / 2 + 1; splineOrder_ = splineOrder; requestedNumberOfThreads_ = nThreads; #ifdef _OPENMP nThreads_ = nThreads ? nThreads : omp_get_max_threads(); #else nThreads_ = 1; #endif scaleFactor_ = scaleFactor; kappa_ = kappa; cacheLineSizeInReals_ = static_cast<Real>(sysconf(_SC_PAGESIZE) / sizeof(Real)); // Helpers to perform 1D FFTs along each dimension. fftHelperA_ = FFTWWrapper<Real>(dimA_); fftHelperB_ = FFTWWrapper<Real>(dimB_); fftHelperC_ = FFTWWrapper<Real>(dimC_); // Grid iterators to correctly wrap the grid when using splines. gridIteratorA_ = makeGridIterator(dimA_, firstA_, lastA_); gridIteratorB_ = makeGridIterator(dimB_, firstB_, lastB_); gridIteratorC_ = makeGridIterator(dimC_, firstC_, lastC_); // Fourier space spline norms. Spline spline = Spline(0, 0, splineOrder_, 0); splineModA_ = spline.invSplineModuli(dimA_); splineModB_ = spline.invSplineModuli(dimB_); splineModC_ = spline.invSplineModuli(dimC_); // Set up function pointers by instantiating the appropriate evaluation functions. We could add many more // entries by default here, but don't right now to avoid code bloat. To add an extra rPower kernel is a // trivial cut and paste exercise; just add a new line with the desired 1/R power as the macro's argument. switch (rPower) { ENABLE_KERNEL_WITH_INVERSE_R_EXPONENT_OF(1); ENABLE_KERNEL_WITH_INVERSE_R_EXPONENT_OF(6); default: std::string msg("Bad rPower requested. To fix this, add the appropriate entry in"); msg += __FILE__; msg += ", line number "; msg += std::to_string(__LINE__ - 5); throw std::runtime_error(msg.c_str()); break; } subsetOfCAlongA_ = myDimC_ / numNodesA_; subsetOfCAlongB_ = myDimC_ / numNodesB_; subsetOfBAlongC_ = myDimB_ / numNodesC_; workSpace1_ = helpme::vector<Complex>(myDimC_ * myComplexDimA_ * myDimB_); workSpace2_ = helpme::vector<Complex>(myDimC_ * myComplexDimA_ * myDimB_); } } public: PMEInstance() : dimA_(0), dimB_(0), dimC_(0), splineOrder_(0), requestedNumberOfThreads_(-1), rPower_(0), scaleFactor_(0), kappa_(0), boxVecs_(3, 3), recVecs_(3, 3), scaledRecVecs_(3, 3), numNodesA_(1), numNodesB_(1), numNodesC_(1), cellA_(0), cellB_(0), cellC_(0), cellAlpha_(0), cellBeta_(0), cellGamma_(0) {} /*! * \brief cellVolume Compute the volume of the unit cell. * \return volume in units consistent with those used to define the lattice vectors. */ Real cellVolume() { return boxVecs_(0, 0) * boxVecs_(1, 1) * boxVecs_(2, 2) - boxVecs_(0, 0) * boxVecs_(1, 2) * boxVecs_(2, 1) + boxVecs_(0, 1) * boxVecs_(1, 2) * boxVecs_(2, 0) - boxVecs_(0, 1) * boxVecs_(1, 0) * boxVecs_(2, 2) + boxVecs_(0, 2) * boxVecs_(1, 0) * boxVecs_(2, 1) - boxVecs_(0, 2) * boxVecs_(1, 1) * boxVecs_(2, 0); } /*! * \brief Sets the unit cell lattice vectors, with units consistent with those used to specify coordinates. * \param A the A lattice parameter in units consistent with the coordinates. * \param B the B lattice parameter in units consistent with the coordinates. * \param C the C lattice parameter in units consistent with the coordinates. * \param alpha the alpha lattice parameter in degrees. * \param beta the beta lattice parameter in degrees. * \param gamma the gamma lattice parameter in degrees. * \param latticeType how to arrange the lattice vectors. Options are * ShapeMatrix: enforce a symmetric representation of the lattice vectors [c.f. S. Nosé and M. L. Klein, * Mol. Phys. 50 1055 (1983)] particularly appendix C. * XAligned: make the A vector coincide with the X axis, the B vector fall in the XY plane, and the C vector * take the appropriate alignment to completely define the system. */ void setLatticeVectors(Real A, Real B, Real C, Real alpha, Real beta, Real gamma, LatticeType latticeType) { if (A != cellA_ || B != cellB_ || C != cellC_ || alpha != cellAlpha_ || beta != cellBeta_ || gamma != cellGamma_ || latticeType != latticeType_) { if (latticeType == LatticeType::ShapeMatrix) { RealMat HtH(3, 3); HtH(0, 0) = A * A; HtH(1, 1) = B * B; HtH(2, 2) = C * C; const float TOL = 1e-4f; // Check for angles very close to 90, to avoid noise from the eigensolver later on. HtH(0, 1) = HtH(1, 0) = std::abs(gamma - 90) < TOL ? 0 : A * B * cos(M_PI * gamma / 180); HtH(0, 2) = HtH(2, 0) = std::abs(beta - 90) < TOL ? 0 : A * C * cos(M_PI * beta / 180); HtH(1, 2) = HtH(2, 1) = std::abs(alpha - 90) < TOL ? 0 : B * C * cos(M_PI * alpha / 180); auto eigenTuple = HtH.diagonalize(); RealMat evalsReal = std::get<0>(eigenTuple); RealMat evecs = std::get<1>(eigenTuple); for (int i = 0; i < 3; ++i) evalsReal(i, 0) = sqrt(evalsReal(i, 0)); boxVecs_.setZero(); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { for (int k = 0; k < 3; ++k) { boxVecs_(i, j) += evecs(i, k) * evecs(j, k) * evalsReal(k, 0); } } } recVecs_ = boxVecs_.inverse(); } else if (latticeType == LatticeType::XAligned) { boxVecs_(0, 0) = A; boxVecs_(0, 1) = 0; boxVecs_(0, 2) = 0; boxVecs_(1, 0) = B * cos(M_PI / 180 * gamma); boxVecs_(1, 1) = B * sin(M_PI / 180 * gamma); boxVecs_(1, 2) = 0; boxVecs_(2, 0) = C * cos(M_PI / 180 * beta); boxVecs_(2, 1) = (B * C * cos(M_PI / 180 * alpha) - boxVecs_(2, 0) * boxVecs_(1, 0)) / boxVecs_(1, 1); boxVecs_(2, 2) = sqrt(C * C - boxVecs_(2, 0) * boxVecs_(2, 0) - boxVecs_(2, 1) * boxVecs_(2, 1)); } else { throw std::runtime_error("Unknown lattice type in setLatticeVectors"); } recVecs_ = boxVecs_.inverse(); scaledRecVecs_ = recVecs_.clone(); scaledRecVecs_.row(0) *= dimA_; scaledRecVecs_.row(1) *= dimB_; scaledRecVecs_.row(2) *= dimC_; cellA_ = A; cellB_ = B; cellC_ = C; cellAlpha_ = alpha; cellBeta_ = beta; cellGamma_ = gamma; latticeType_ = latticeType; unitCellHasChanged_ = true; } else { unitCellHasChanged_ = false; } } /*! * \brief Performs the forward 3D FFT of the discretized parameter grid. * \param realGrid the array of discretized parameters (stored in CBA order, * with A being the fast running index) to be transformed. * \return Pointer to the transformed grid, which is stored in one of the buffers in BAC order. */ Complex *forwardTransform(Real *realGrid) { Real *realCBA; Complex *buffer1, *buffer2; if (realGrid == reinterpret_cast<Real *>(workSpace1_.data())) { realCBA = reinterpret_cast<Real *>(workSpace2_.data()); buffer1 = workSpace2_.data(); buffer2 = workSpace1_.data(); } else { realCBA = reinterpret_cast<Real *>(workSpace2_.data()); buffer1 = workSpace2_.data(); buffer2 = workSpace1_.data(); } #if HAVE_MPI == 1 if (numNodesA_ > 1) { // Communicate A along columns mpiCommunicatorA_->allToAll(realGrid, realCBA, subsetOfCAlongA_ * myDimA_ * myDimB_); // Resort the data to end up with realGrid holding a full row of A data, for B pencil and C subset. for (int c = 0; c < subsetOfCAlongA_; ++c) { Real *outC = realGrid + c * myDimB_ * dimA_; for (int b = 0; b < myDimB_; ++b) { for (int chunk = 0; chunk < numNodesA_; ++chunk) { Real *inPtr = realCBA + (chunk * subsetOfCAlongA_ + c) * myDimB_ * myDimA_ + b * myDimA_; std::copy(inPtr, inPtr + myDimA_, outC + b * dimA_ + chunk * myDimA_); } } } } #endif // Each parallel node allocates buffers of length dimA/(2 numNodesA)+1 for A, leading to a total of // dimA/2 + numNodesA = complexDimA+numNodesA-1 if dimA is even // and // numNodesA (dimA-1)/2 + numNodesA = complexDimA + numNodesA/2-1 if dimA is odd // We just allocate the larger size here, remembering that the final padding values on the last node // will all be allocated to zero and will not contribute to the final answer. helpme::vector<Complex> buffer(complexDimA_ + numNodesA_ - 1); // A transform, with instant sort to CAB ordering for each local block auto scratch = buffer.data(); for (int c = 0; c < subsetOfCAlongA_; ++c) { for (int b = 0; b < myDimB_; ++b) { Real *gridPtr = realGrid + c * myDimB_ * dimA_ + b * dimA_; fftHelperA_.transform(gridPtr, scratch); for (int chunk = 0; chunk < numNodesA_; ++chunk) { for (int a = 0; a < myComplexDimA_; ++a) { buffer1[(chunk * subsetOfCAlongA_ + c) * myComplexDimA_ * myDimB_ + a * myDimB_ + b] = scratch[chunk * myComplexDimA_ + a]; } } } } #if HAVE_MPI == 1 // Communicate A back to blocks if (numNodesA_ > 1) { mpiCommunicatorA_->allToAll(buffer1, buffer2, subsetOfCAlongA_ * myComplexDimA_ * myDimB_); std::swap(buffer1, buffer2); } // Communicate B along rows if (numNodesB_ > 1) { mpiCommunicatorB_->allToAll(buffer1, buffer2, subsetOfCAlongB_ * myComplexDimA_ * myDimB_); // Resort the data to end up with the buffer holding a full row of B data, for A pencil and C subset. for (int c = 0; c < subsetOfCAlongB_; ++c) { Complex *cPtr = buffer1 + c * myComplexDimA_ * dimB_; for (int a = 0; a < myComplexDimA_; ++a) { for (int chunk = 0; chunk < numNodesB_; ++chunk) { Complex *inPtr = buffer2 + (chunk * subsetOfCAlongB_ + c) * myComplexDimA_ * myDimB_ + a * myDimB_; std::copy(inPtr, inPtr + myDimB_, cPtr + a * dimB_ + chunk * myDimB_); } } } } #endif // B transform for (int c = 0; c < subsetOfCAlongB_; ++c) { Complex *cPtr = buffer1 + c * myComplexDimA_ * dimB_; for (int a = 0; a < myComplexDimA_; ++a) { fftHelperB_.transform(cPtr + a * dimB_, FFTW_FORWARD); } } #if HAVE_MPI == 1 if (numNodesB_ > 1) { for (int c = 0; c < subsetOfCAlongB_; ++c) { Complex *zPtr = buffer1 + c * myComplexDimA_ * dimB_; for (int a = 0; a < myComplexDimA_; ++a) { for (int chunk = 0; chunk < numNodesB_; ++chunk) { Complex *inPtr = zPtr + a * dimB_ + chunk * myDimB_; Complex *outPtr = buffer2 + (chunk * subsetOfCAlongB_ + c) * myComplexDimA_ * myDimB_ + a * myDimB_; std::copy(inPtr, inPtr + myDimB_, outPtr); } } } // Communicate B back to blocks mpiCommunicatorB_->allToAll(buffer2, buffer1, subsetOfCAlongB_ * myComplexDimA_ * myDimB_); } #endif // sort local blocks from CAB to BAC order for (int b = 0; b < myDimB_; ++b) { for (int a = 0; a < myComplexDimA_; ++a) { for (int c = 0; c < myDimC_; ++c) { buffer2[b * myComplexDimA_ * myDimC_ + a * myDimC_ + c] = buffer1[c * myComplexDimA_ * myDimB_ + a * myDimB_ + b]; } } } #if HAVE_MPI == 1 if (numNodesC_ > 1) { // Communicate C along columns mpiCommunicatorC_->allToAll(buffer2, buffer1, subsetOfBAlongC_ * myComplexDimA_ * myDimC_); for (int b = 0; b < subsetOfBAlongC_; ++b) { Complex *outPtrB = buffer2 + b * myComplexDimA_ * dimC_; for (int a = 0; a < myComplexDimA_; ++a) { Complex *outPtrBA = outPtrB + a * dimC_; for (int chunk = 0; chunk < numNodesC_; ++chunk) { Complex *inPtr = buffer1 + (chunk * subsetOfBAlongC_ + b) * myComplexDimA_ * myDimC_ + a * myDimC_; std::copy(inPtr, inPtr + myDimC_, outPtrBA + chunk * myDimC_); } } } } #endif // C transform for (int b = 0; b < subsetOfBAlongC_; ++b) { Complex *outPtrB = buffer2 + b * myComplexDimA_ * dimC_; for (int a = 0; a < myComplexDimA_; ++a) { Complex *outPtrBA = outPtrB + a * dimC_; fftHelperC_.transform(outPtrBA, FFTW_FORWARD); } } return buffer2; } /*! * \brief Performs the inverse 3D FFT. * \param convolvedGrid the complex array of discretized parameters convolved with the influence function * (stored in BAC order, with C being the fast running index) to be transformed. * \return Pointer to the potential grid, which is stored in one of the buffers in CBA order. */ Real *inverseTransform(Complex *convolvedGrid) { Complex *buffer1, *buffer2; // Setup scratch, taking care not to overwrite the convolved grid. if (convolvedGrid == workSpace1_.data()) { buffer1 = workSpace2_.data(); buffer2 = workSpace1_.data(); } else { buffer1 = workSpace1_.data(); buffer2 = workSpace2_.data(); } // C transform for (int y = 0; y < subsetOfBAlongC_; ++y) { for (int x = 0; x < myComplexDimA_; ++x) { int yx = y * myComplexDimA_ * dimC_ + x * dimC_; fftHelperC_.transform(convolvedGrid + yx, FFTW_BACKWARD); } } #if HAVE_MPI == 1 if (numNodesC_ > 1) { // Communicate C back to blocks for (int b = 0; b < subsetOfBAlongC_; ++b) { Complex *inPtrB = convolvedGrid + b * myComplexDimA_ * dimC_; for (int a = 0; a < myComplexDimA_; ++a) { Complex *inPtrBA = inPtrB + a * dimC_; for (int chunk = 0; chunk < numNodesC_; ++chunk) { Complex *inPtrBAC = inPtrBA + chunk * myDimC_; Complex *outPtr = buffer1 + (chunk * subsetOfBAlongC_ + b) * myComplexDimA_ * myDimC_ + a * myDimC_; std::copy(inPtrBAC, inPtrBAC + myDimC_, outPtr); } } } mpiCommunicatorC_->allToAll(buffer1, buffer2, subsetOfBAlongC_ * myComplexDimA_ * myDimC_); } #endif // sort local blocks from BAC to CAB order for (int B = 0; B < myDimB_; ++B) { for (int A = 0; A < myComplexDimA_; ++A) { for (int C = 0; C < myDimC_; ++C) { buffer1[C * myComplexDimA_ * myDimB_ + A * myDimB_ + B] = buffer2[B * myComplexDimA_ * myDimC_ + A * myDimC_ + C]; } } } #if HAVE_MPI == 1 // Communicate B along rows if (numNodesB_ > 1) { mpiCommunicatorB_->allToAll(buffer1, buffer2, subsetOfCAlongB_ * myComplexDimA_ * myDimB_); // Resort the data to end up with the buffer holding a full row of B data, for A pencil and C subset. for (int c = 0; c < subsetOfCAlongB_; ++c) { Complex *cPtr = buffer1 + c * myComplexDimA_ * dimB_; for (int a = 0; a < myComplexDimA_; ++a) { for (int chunk = 0; chunk < numNodesB_; ++chunk) { Complex *inPtr = buffer2 + (chunk * subsetOfCAlongB_ + c) * myComplexDimA_ * myDimB_ + a * myDimB_; std::copy(inPtr, inPtr + myDimB_, cPtr + a * dimB_ + chunk * myDimB_); } } } } #endif // B transform with instant sort of local blocks from CAB -> CBA order for (int c = 0; c < subsetOfCAlongB_; ++c) { for (int a = 0; a < myComplexDimA_; ++a) { int cx = c * myComplexDimA_ * dimB_ + a * dimB_; fftHelperB_.transform(buffer1 + cx, FFTW_BACKWARD); for (int b = 0; b < myDimB_; ++b) { for (int chunk = 0; chunk < numNodesB_; ++chunk) { int cb = (chunk * subsetOfCAlongB_ + c) * myDimB_ * myComplexDimA_ + b * myComplexDimA_; buffer2[cb + a] = buffer1[cx + chunk * myDimB_ + b]; } } } } #if HAVE_MPI == 1 // Communicate B back to blocks if (numNodesB_ > 1) { mpiCommunicatorB_->allToAll(buffer2, buffer1, subsetOfCAlongB_ * myComplexDimA_ * myDimB_); } else { std::swap(buffer1, buffer2); } // Communicate A along rows if (numNodesA_ > 1) { mpiCommunicatorA_->allToAll(buffer1, buffer2, subsetOfCAlongA_ * myComplexDimA_ * myDimB_); // Resort the data to end up with the buffer holding a full row of A data, for B pencil and C subset. for (int c = 0; c < subsetOfCAlongA_; ++c) { Complex *cPtr = buffer1 + c * myDimB_ * complexDimA_; for (int b = 0; b < myDimB_; ++b) { for (int chunk = 0; chunk < numNodesA_; ++chunk) { Complex *inPtr = buffer2 + (chunk * subsetOfCAlongA_ + c) * myComplexDimA_ * myDimB_ + b * myComplexDimA_; std::copy(inPtr, inPtr + myComplexDimA_, cPtr + b * complexDimA_ + chunk * myComplexDimA_); } } } } #else std::swap(buffer1, buffer2); #endif // A transform Real *realGrid = reinterpret_cast<Real *>(buffer2); for (int cb = 0; cb < subsetOfCAlongA_ * myDimB_; ++cb) { fftHelperA_.transform(buffer1 + cb * complexDimA_, realGrid + cb * dimA_); } #if HAVE_MPI == 1 // Communicate A back to blocks if (numNodesA_ > 1) { Real *realGrid2 = reinterpret_cast<Real *>(buffer1); for (int c = 0; c < subsetOfCAlongA_; ++c) { Real *cPtr = realGrid + c * myDimB_ * dimA_; for (int b = 0; b < myDimB_; ++b) { for (int chunk = 0; chunk < numNodesA_; ++chunk) { Real *outPtr = realGrid2 + (chunk * subsetOfCAlongA_ + c) * myDimB_ * myDimA_ + b * myDimA_; Real *inPtr = cPtr + b * dimA_ + chunk * myDimA_; std::copy(inPtr, inPtr + myDimA_, outPtr); } } } mpiCommunicatorA_->allToAll(realGrid2, realGrid, subsetOfCAlongA_ * myDimB_ * myDimA_); } #endif return realGrid; } /*! * \brief convolveE A wrapper to determine the correct convolution function to call. * \param transformedGrid the pointer to the complex array holding the transformed grid in YXZ ordering. * \return the reciprocal space energy. */ Real convolveE(Complex *transformedGrid) { updateInfluenceFunction(); size_t myNy = myDimB_ / numNodesC_; size_t myNx = myComplexDimA_; size_t nz = dimC_; size_t nxz = myNx * nz; size_t nyxz = myNy * nxz; size_t halfNx = dimA_ / 2 + 1; bool iAmNodeZero = (rankA_ == 0 && rankB_ == 0 && rankC_ == 0); Real *influenceFunction = cachedInfluenceFunction_.data(); int startX = rankA_ * myComplexDimA_; Real energy = 0; if (rPower_ > 3 && iAmNodeZero) { // Kernels with rPower>3 are absolutely convergent and should have the m=0 term present. // To compute it we need sum_ij c(i)c(j), which can be obtained from the structure factor norm. Real prefac = 2 * scaleFactor_ * M_PI * sqrtPi * pow(kappa_, rPower_ - 3) / ((rPower_ - 3) * nonTemplateGammaComputer<Real>(rPower_) * cellVolume()); energy += prefac * std::norm(transformedGrid[0]); } transformedGrid[0] = Complex(0, 0); #pragma omp parallel for reduction(+ : energy) num_threads(nThreads_) for (size_t yxz = 0; yxz < nyxz; ++yxz) { size_t xz = yxz % nxz; int kx = startX + xz / nz; // We only loop over the first nx/2+1 x values; this // accounts for the "missing" complex conjugate values. Real permPrefac = kx != 0 && kx != halfNx - 1 ? 2 : 1; Real structFactorNorm = std::norm(transformedGrid[yxz]); energy += permPrefac * structFactorNorm * influenceFunction[yxz]; transformedGrid[yxz] *= influenceFunction[yxz]; } return energy / 2; } /*! * \brief convolveEV A wrapper to determine the correct convolution function to call, including virial. * \param transformedGrid the pointer to the complex array holding the transformed grid in YXZ ordering. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the reciprocal space energy. */ Real convolveEV(Complex *transformedGrid, RealMat &virial) { return convolveEVFxn_(dimA_, dimB_, dimC_, myComplexDimA_, myDimB_ / numNodesC_, rankA_ * myComplexDimA_, rankB_ * myDimB_ + rankC_ * myDimB_ / numNodesC_, scaleFactor_, transformedGrid, recVecs_, cellVolume(), kappa_, &splineModA_[0], &splineModB_[0], &splineModC_[0], virial, nThreads_); } /*! * \brief Spread the parameters onto the charge grid. Generally this shouldn't be called; * use the various computeE() methods instead. This the more efficient version that filters * the atom list and uses pre-computed splines. Therefore, the splineCache_ * member must have been updated via a call to filterAtomsAndBuildSplineCache() first. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \return realGrid the array of discretized parameters (stored in CBA order). */ Real *spreadParameters(int parameterAngMom, const RealMat &parameters) { Real *realGrid = reinterpret_cast<Real *>(workSpace1_.data()); std::fill(workSpace1_.begin(), workSpace1_.end(), 0); updateAngMomIterator(parameterAngMom); size_t nAtoms = atomList_.size(); int nComponents = nCartesian(parameterAngMom); for (size_t relativeAtomNumber = 0; relativeAtomNumber < nAtoms; ++relativeAtomNumber) { const auto &entry = splineCache_[relativeAtomNumber]; const int &atom = entry.absoluteAtomNumber; const auto &splineA = entry.aSpline; const auto &splineB = entry.bSpline; const auto &splineC = entry.cSpline; spreadParametersImpl(atom, realGrid, nComponents, splineA, splineB, splineC, parameters); } return realGrid; } /*! * \brief Spread the parameters onto the charge grid. Generally this shouldn't be called; * use the various computeE() methods instead. This is the slower version of this call that recomputes * splines on demand and makes no assumptions about the integrity of the spline cache. * \param parameterAngMom the angular momentum of the parameters * (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). * \param parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, * etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \return realGrid the array of discretized parameters (stored in CBA order). */ Real *spreadParameters(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { Real *realGrid = reinterpret_cast<Real *>(workSpace1_.data()); std::fill(workSpace1_.begin(), workSpace1_.end(), 0); updateAngMomIterator(parameterAngMom); int nComponents = nCartesian(parameterAngMom); size_t nAtoms = coordinates.nRows(); for (size_t atom = 0; atom < nAtoms; ++atom) { // Blindly reconstruct splines for this atom, assuming nothing about the validity of the cache. // Note that this incurs a somewhat steep cost due to repeated memory allocations. auto bSplines = makeBSplines(coordinates[atom], parameterAngMom); const auto &splineA = std::get<0>(bSplines); const auto &splineB = std::get<1>(bSplines); const auto &splineC = std::get<2>(bSplines); spreadParametersImpl(atom, realGrid, nComponents, splineA, splineB, splineC, parameters); } return realGrid; } /*! * \brief Probes the potential grid to get the forces. Generally this shouldn't be called; * use the various computeE() methods instead. This is the slower version of this call that recomputes * splines on demand and makes no assumptions about the integrity of the spline cache. * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. */ void probeGrid(const Real *potentialGrid, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { updateAngMomIterator(parameterAngMom + 1); int nComponents = nCartesian(parameterAngMom); int nForceComponents = nCartesian(parameterAngMom + 1); RealMat fractionalPhis(1, nForceComponents); size_t nAtoms = parameters.nRows(); for (size_t atom = 0; atom < nAtoms; ++atom) { auto bSplines = makeBSplines(coordinates[atom], parameterAngMom + 1); auto splineA = std::get<0>(bSplines); auto splineB = std::get<1>(bSplines); auto splineC = std::get<2>(bSplines); probeGridImpl(atom, potentialGrid, nComponents, nForceComponents, splineA, splineB, splineC, fractionalPhis[0], parameters, forces[atom]); } } /*! * \brief Probes the potential grid to get the forces. Generally this shouldn't be called; * use the various computeE() methods instead. This is the faster version that uses * the filtered atom list and uses pre-computed splines. Therefore, the splineCache_ * member must have been updated via a call to filterAtomsAndBuildSplineCache() first. * * \param potentialGrid pointer to the array containing the potential, in ZYX order. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. */ void probeGrid(const Real *potentialGrid, int parameterAngMom, const RealMat &parameters, RealMat &forces) { updateAngMomIterator(parameterAngMom + 1); int nComponents = nCartesian(parameterAngMom); int nForceComponents = nCartesian(parameterAngMom + 1); const Real *paramPtr = parameters[0]; // Find how many multiples of the cache line size are needed // to ensure that each thread hits a unique page. size_t rowSize = std::ceil(nForceComponents / cacheLineSizeInReals_) * cacheLineSizeInReals_; RealMat fractionalPhis(nThreads_, rowSize); size_t nAtoms = atomList_.size(); #pragma omp parallel for num_threads(nThreads_) for (size_t relativeAtomNumber = 0; relativeAtomNumber < nAtoms; ++relativeAtomNumber) { const auto &entry = splineCache_[relativeAtomNumber]; const int &atom = entry.absoluteAtomNumber; const auto &splineA = entry.aSpline; const auto &splineB = entry.bSpline; const auto &splineC = entry.cSpline; if (parameterAngMom) { #ifdef _OPENMP int threadID = omp_get_thread_num(); #else int threadID = 1; #endif Real *myScratch = fractionalPhis[threadID % nThreads_]; probeGridImpl(atom, potentialGrid, nComponents, nForceComponents, splineA, splineB, splineC, myScratch, parameters, forces[atom]); } else { probeGridImpl(potentialGrid, splineA, splineB, splineC, paramPtr[atom], forces[atom]); } } } /*! * \brief computeESlf computes the Ewald self interaction energy. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \return the self energy. */ Real computeESlf(int parameterAngMom, const RealMat &parameters) { assertInitialized(); return slfEFxn_(parameterAngMom, parameters, kappa_, scaleFactor_); } /*! * \brief computeEDir computes the direct space energy. This is provided mostly for debugging and testing * purposes; generally the host program should provide the pairwise interactions. \param pairList dense list of * atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. \param parameterAngMom the angular momentum of * the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). \param parameters the list of * parameters associated with each atom (charges, C6 coefficients, multipoles, etc...). For a parameter with * angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the * fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \return the direct space energy. */ Real computeEDir(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); energy += parameters(i, 0) * parameters(j, 0) * dirEFxn_(rSquared, kappaSquared); } return scaleFactor_ * energy; } /*! * \brief computeEFDir computes the direct space energy and force. This is provided mostly for debugging and * testing purposes; generally the host program should provide the pairwise interactions. * \param pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the direct space energy. */ Real computeEFDir(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); auto kernels = dirEFFxn_(rSquared, kappa_, kappaSquared); Real eKernel = std::get<0>(kernels); Real fKernel = std::get<1>(kernels); Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0); energy += prefactor * eKernel; Real f = -prefactor * fKernel; auto force = deltaR.row(0); force *= f; forces.row(i) -= force; forces.row(j) += force; } return energy; } /*! * \brief computeEFVDir computes the direct space energy, force and virial. This is provided mostly for * debugging and testing purposes; generally the host program should provide the pairwise interactions. \param * pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. \param parameterAngMom * the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). \param * parameters the list of parameters associated with each atom (charges, C6 coefficients, multipoles, etc...). * For a parameter with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = * (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the direct space energy. */ Real computeEFVDir(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces, RealMat &virial) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); auto kernels = dirEFFxn_(rSquared, kappa_, kappaSquared); Real eKernel = std::get<0>(kernels); Real fKernel = std::get<1>(kernels); Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0); energy += prefactor * eKernel; Real f = -prefactor * fKernel; RealMat dRCopy = deltaR.clone(); auto force = dRCopy.row(0); force *= f; forces.row(i) -= force; forces.row(j) += force; virial[0][0] += force[0] * deltaR[0][0]; virial[0][1] += 0.5f * (force[0] * deltaR[0][1] + force[1] * deltaR[0][0]); virial[0][2] += force[1] * deltaR[0][1]; virial[0][3] += 0.5f * (force[0] * deltaR[0][2] + force[2] * deltaR[0][0]); virial[0][4] += 0.5f * (force[1] * deltaR[0][2] + force[2] * deltaR[0][1]); virial[0][5] += force[2] * deltaR[0][2]; } return energy; } /*! * \brief computeEAdj computes the adjusted real space energy which extracts the energy for excluded pairs that * is present in reciprocal space. This is provided mostly for debugging and testing purposes; generally the * host program should provide the pairwise interactions. * \param pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \return the adjusted energy. */ Real computeEAdj(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); energy += parameters(i, 0) * parameters(j, 0) * adjEFxn_(rSquared, kappaSquared); } return scaleFactor_ * energy; } /*! * \brief computeEFAdj computes the adjusted energy and force. This is provided mostly for debugging and * testing purposes; generally the host program should provide the pairwise interactions. \param pairList dense * list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. \param parameterAngMom the angular * momentum of the parameters (0 for charges, C6 coefficients, 2 for quadrupoles, etc.). \param parameters the * list of parameters associated with each atom (charges, C6 coefficients, multipoles, etc...). For a parameter * with angular momentum L, a matrix of dimension nAtoms x nL is expected, where nL = (L+1)*(L+2)*(L+3)/6 and * the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the adjusted energy. */ Real computeEFAdj(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); auto kernels = adjEFFxn_(rSquared, kappa_, kappaSquared); Real eKernel = std::get<0>(kernels); Real fKernel = std::get<1>(kernels); Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0); energy += prefactor * eKernel; Real f = -prefactor * fKernel; auto force = deltaR.row(0); force *= f; forces.row(i) -= force; forces.row(j) += force; } return energy; } /*! * \brief computeEFVAdj computes the adjusted energy, forces and virial. This is provided mostly for debugging * and testing purposes; generally the host program should provide the pairwise interactions. * \param pairList dense list of atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the adjusted energy. */ Real computeEFVAdj(const Matrix<short> &pairList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces, RealMat &virial) { if (parameterAngMom) throw std::runtime_error("Multipole self terms have not been coded yet."); sanityChecks(parameterAngMom, parameters, coordinates); Real energy = 0; Real kappaSquared = kappa_ * kappa_; size_t nPair = pairList.nRows(); for (int pair = 0; pair < nPair; ++pair) { short i = pairList(pair, 0); short j = pairList(pair, 1); auto deltaR = coordinates.row(j) - coordinates.row(i); // TODO: apply minimum image convention. Real rSquared = deltaR.dot(deltaR); auto kernels = adjEFFxn_(rSquared, kappa_, kappaSquared); Real eKernel = std::get<0>(kernels); Real fKernel = std::get<1>(kernels); Real prefactor = scaleFactor_ * parameters(i, 0) * parameters(j, 0); energy += prefactor * eKernel; Real f = -prefactor * fKernel; RealMat dRCopy = deltaR.clone(); auto force = dRCopy.row(0); force *= f; forces.row(i) -= force; forces.row(j) += force; virial[0][0] += force[0] * deltaR[0][0]; virial[0][1] += 0.5f * (force[0] * deltaR[0][1] + force[1] * deltaR[0][0]); virial[0][2] += force[1] * deltaR[0][1]; virial[0][3] += 0.5f * (force[0] * deltaR[0][2] + force[2] * deltaR[0][0]); virial[0][4] += 0.5f * (force[1] * deltaR[0][2] + force[2] * deltaR[0][1]); virial[0][5] += force[2] * deltaR[0][2]; } return energy; } /*! * \brief Runs a PME reciprocal space calculation, computing the potential and, optionally, its derivatives. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param gridPoints the list of grid points at which the potential is needed; can be the same as the * coordinates. \param derivativeLevel the order of the potential derivatives required; 0 is the potential, 1 is * (minus) the field, etc. \param potential the array holding the potential. This is a matrix of dimensions * nAtoms x nD, where nD is the derivative level requested. See the details fo the parameters argument for * information about ordering of derivative components. N.B. this array is incremented with the potential, not * assigned, so take care to zero it first if only the current results are desired. */ void computePRec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, const RealMat &gridPoints, int derivativeLevel, RealMat &potential) { sanityChecks(parameterAngMom, parameters, coordinates); updateAngMomIterator(std::max(parameterAngMom, derivativeLevel)); // Note: we're calling the version of spread parameters that computes its own splines here. // This is quite inefficient, but allow the potential to be computed at arbitrary locations by // simply regenerating splines on demand in the probing stage. If this becomes too slow, it's // easy to write some logic to check whether gridPoints and coordinates are the same, and // handle that special case using spline cacheing machinery for efficiency. auto realGrid = spreadParameters(parameterAngMom, parameters, coordinates); auto gridAddress = forwardTransform(realGrid); convolveE(gridAddress); const auto potentialGrid = inverseTransform(gridAddress); auto fracPotential = potential.clone(); int nPotentialComponents = nCartesian(derivativeLevel); size_t nPoints = gridPoints.nRows(); for (size_t point = 0; point < nPoints; ++point) { auto bSplines = makeBSplines(gridPoints[point], derivativeLevel); auto splineA = std::get<0>(bSplines); auto splineB = std::get<1>(bSplines); auto splineC = std::get<2>(bSplines); probeGridImpl(potentialGrid, nPotentialComponents, splineA, splineB, splineC, fracPotential[point]); } potential += cartesianTransform(derivativeLevel, scaledRecVecs_, fracPotential); } /*! * \brief Runs a PME reciprocal space calculation, computing energies. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \return the reciprocal space energy. */ Real computeERec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { sanityChecks(parameterAngMom, parameters, coordinates); filterAtomsAndBuildSplineCache(parameterAngMom, coordinates); auto realGrid = spreadParameters(parameterAngMom, parameters); auto gridAddress = forwardTransform(realGrid); return convolveE(gridAddress); } /*! * \brief Runs a PME reciprocal space calculation, computing energies and forces. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the reciprocal space energy. */ Real computeEFRec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { sanityChecks(parameterAngMom, parameters, coordinates); // Spline derivative level bumped by 1, for energy gradients. filterAtomsAndBuildSplineCache(parameterAngMom + 1, coordinates); auto realGrid = spreadParameters(parameterAngMom, parameters); auto gridAddress = forwardTransform(realGrid); Real energy = convolveE(gridAddress); const auto potentialGrid = inverseTransform(gridAddress); probeGrid(potentialGrid, parameterAngMom, parameters, forces); return energy; } /*! * \brief Runs a PME reciprocal space calculation, computing energies, forces and the virial. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the reciprocal space energy. */ Real computeEFVRec(int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces, RealMat &virial) { sanityChecks(parameterAngMom, parameters, coordinates); // Spline derivative level bumped by 1, for energy gradients. filterAtomsAndBuildSplineCache(parameterAngMom + 1, coordinates); auto realGrid = spreadParameters(parameterAngMom, parameters); auto gridPtr = forwardTransform(realGrid); Real energy = convolveEV(gridPtr, virial); const auto potentialGrid = inverseTransform(gridPtr); probeGrid(potentialGrid, parameterAngMom, parameters, forces); return energy; } /*! * \brief Runs a full (direct and reciprocal space) PME calculation, computing the energy. The direct space * implementation here is not totally optimal, so this routine should primarily be used for testing and * debugging. * \param includedList dense list of included atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN,jN. * \param excludedList dense list of excluded atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the full PME energy. */ Real computeEAll(const Matrix<short> &includedList, const Matrix<short> &excludedList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates) { sanityChecks(parameterAngMom, parameters, coordinates); Real energy = computeERec(parameterAngMom, parameters, coordinates); energy += computeESlf(parameterAngMom, parameters); energy += computeEDir(includedList, parameterAngMom, parameters, coordinates); energy += computeEAdj(excludedList, parameterAngMom, parameters, coordinates); return energy; } /*! * \brief Runs a full (direct and reciprocal space) PME calculation, computing energies and forces. The direct * space implementation here is not totally optimal, so this routine should primarily be used for testing * and debugging. * \param includedList dense list of included atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param excludedList dense list of excluded atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \return the full PME energy. */ Real computeEFAll(const Matrix<short> &includedList, const Matrix<short> &excludedList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces) { sanityChecks(parameterAngMom, parameters, coordinates); Real energy = computeEFRec(parameterAngMom, parameters, coordinates, forces); energy += computeESlf(parameterAngMom, parameters); energy += computeEFDir(includedList, parameterAngMom, parameters, coordinates, forces); energy += computeEFAdj(excludedList, parameterAngMom, parameters, coordinates, forces); return energy; } /*! * \brief Runs a full (direct and reciprocal space) PME calculation, computing energies, forces and virials. * The direct space implementation here is not totally optimal, so this routine should primarily * be used for testing and debugging. * \param includedList dense list of included atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param excludedList dense list of excluded atom pairs, ordered like i1, j1, i2, j2, i3, j3, ... iN, jN. * \param parameterAngMom the angular momentum of the parameters (0 for charges, C6 coefficients, 2 for * quadrupoles, etc.). \param parameters the list of parameters associated with each atom (charges, C6 * coefficients, multipoles, etc...). For a parameter with angular momentum L, a matrix of dimension nAtoms x nL * is expected, where nL = (L+1)*(L+2)*(L+3)/6 and the fast running index nL has the ordering * * 0 X Y Z XX XY YY XZ YZ ZZ XXX XXY XYY YYY XXZ XYZ YYZ XZZ YZZ ZZZ ... * * i.e. generated by the python loops * \code{.py} * for L in range(maxAM+1): * for Lz in range(0,L+1): * for Ly in range(0, L - Lz + 1): * Lx = L - Ly - Lz * \endcode * \param coordinates the cartesian coordinates, ordered in memory as {x1,y1,z1,x2,y2,z2,....xN,yN,zN}. * \param energy pointer to the variable holding the energy; this is incremented, not assigned. * \param forces a Nx3 matrix of the forces, ordered in memory as {Fx1,Fy1,Fz1,Fx2,Fy2,Fz2,....FxN,FyN,FzN}. * This matrix is incremented, not assigned. * \param virial a vector of length 6 containing the unique virial elements, in the order XX XY YY XZ YZ ZZ. * This vector is incremented, not assigned. * \return the full PME energy. */ Real computeEFVAll(const Matrix<short> &includedList, const Matrix<short> &excludedList, int parameterAngMom, const RealMat &parameters, const RealMat &coordinates, RealMat &forces, RealMat &virial) { sanityChecks(parameterAngMom, parameters, coordinates); Real energy = computeEFVRec(parameterAngMom, parameters, coordinates, forces, virial); energy += computeESlf(parameterAngMom, parameters); energy += computeEFVDir(includedList, parameterAngMom, parameters, coordinates, forces, virial); energy += computeEFVAdj(excludedList, parameterAngMom, parameters, coordinates, forces, virial); return energy; } /*! * \brief setup initializes this object for a PME calculation using only threading. * This may be called repeatedly without compromising performance. * \param rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive * dispersion). \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param splineOrder the order of B-spline; must be at least (2 + max. multipole order + deriv. level needed). * \param dimA the dimension of the FFT grid along the A axis. * \param dimB the dimension of the FFT grid along the B axis. * \param dimC the dimension of the FFT grid along the C axis. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param nThreads the maximum number of threads to use for each MPI instance; if set to 0 all available threads * are used. */ void setup(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, Real scaleFactor, int nThreads) { numNodesHasChanged_ = numNodesA_ != 1 || numNodesB_ != 1 || numNodesC_ != 1; numNodesA_ = numNodesB_ = numNodesC_ = 1; rankA_ = rankB_ = rankC_ = 0; firstA_ = firstB_ = firstC_ = 0; dimA = findGridSize(dimA, {1}); dimB = findGridSize(dimB, {1}); dimC = findGridSize(dimC, {1}); lastA_ = dimA; lastB_ = dimB; lastC_ = dimC; myDimA_ = dimA; myDimB_ = dimB; myDimC_ = dimC; common_init(rPower, kappa, splineOrder, dimA, dimB, dimC, scaleFactor, nThreads); } /*! * \brief setup initializes this object for a PME calculation using MPI parallism and threading. * This may be called repeatedly without compromising performance. * \param rPower the exponent of the (inverse) distance kernel (e.g. 1 for Coulomb, 6 for attractive * dispersion). \param kappa the attenuation parameter in units inverse of those used to specify coordinates. * \param splineOrder the order of B-spline; must be at least (2 + max. multipole order + deriv. level needed). * \param dimA the dimension of the FFT grid along the A axis. * \param dimB the dimension of the FFT grid along the B axis. * \param dimC the dimension of the FFT grid along the C axis. * \param scaleFactor a scale factor to be applied to all computed energies and derivatives thereof (e.g. the * 1 / [4 pi epslion0] for Coulomb calculations). * \param nThreads the maximum number of threads to use for each MPI instance; if set to 0 all available threads * are \param communicator the MPI communicator for the reciprocal space calcultion, which should already be * initialized. * \param numNodesA the number of nodes to be used for the A dimension. * \param numNodesB the number of nodes to be used for the B dimension. * \param numNodesC the number of nodes to be used for the C dimension. */ void setupParallel(int rPower, Real kappa, int splineOrder, int dimA, int dimB, int dimC, Real scaleFactor, int nThreads, const MPI_Comm &communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC) { numNodesHasChanged_ = numNodesA_ != numNodesA || numNodesB_ != numNodesB || numNodesC_ != numNodesC; #if HAVE_MPI == 1 mpiCommunicator_ = std::unique_ptr<MPIWrapper<Real>>(new MPIWrapper<Real>(communicator, numNodesA, numNodesB, numNodesC)); switch (nodeOrder) { case (NodeOrder::ZYX): rankA_ = mpiCommunicator_->myRank_ % numNodesA; rankB_ = (mpiCommunicator_->myRank_ % (numNodesB * numNodesA)) / numNodesA; rankC_ = mpiCommunicator_->myRank_ / (numNodesB * numNodesA); mpiCommunicatorA_ = mpiCommunicator_->split(rankC_ * numNodesB + rankB_, rankA_); mpiCommunicatorB_ = mpiCommunicator_->split(rankC_ * numNodesA + rankA_, rankB_); mpiCommunicatorC_ = mpiCommunicator_->split(rankB_ * numNodesA + rankA_, rankC_); break; default: throw std::runtime_error("Unknown NodeOrder in setupParallel."); } numNodesA_ = numNodesA; numNodesB_ = numNodesB; numNodesC_ = numNodesC; dimA = findGridSize(dimA, {numNodesA}); dimB = findGridSize(dimB, {numNodesB * numNodesC}); dimC = findGridSize(dimC, {numNodesA * numNodesC, numNodesB * numNodesC}); myDimA_ = dimA / numNodesA; myDimB_ = dimB / numNodesB; myDimC_ = dimC / numNodesC; firstA_ = rankA_ * myDimA_; firstB_ = rankB_ * myDimB_; firstC_ = rankC_ * myDimC_; lastA_ = rankA_ == numNodesA ? dimA : (rankA_ + 1) * myDimA_; lastB_ = rankB_ == numNodesB ? dimB : (rankB_ + 1) * myDimB_; lastC_ = rankC_ == numNodesC ? dimC : (rankC_ + 1) * myDimC_; common_init(rPower, kappa, splineOrder, dimA, dimB, dimC, scaleFactor, nThreads); #else // Have MPI throw std::runtime_error( "setupParallel called, but helpme was not compiled with MPI. Make sure you compile with -DHAVE_MPI=1 " "in " "the list of compiler definitions."); #endif // Have MPI } }; } // Namespace helpme using PMEInstanceD = helpme::PMEInstance<double>; using PMEInstanceF = helpme::PMEInstance<float>; #else // C header #include <stddef.h> #if HAVE_MPI == 1 #include <mpi.h> #endif typedef enum { XAligned = 0, ShapeMatrix = 1 } LatticeType; typedef enum { ZYX = 0 } NodeOrder; typedef struct PMEInstance PMEInstance; extern struct PMEInstance *helpme_createD(); extern struct PMEInstance *helpme_createF(); extern void helpme_destroyD(struct PMEInstance *pme); extern void helpme_destroyF(struct PMEInstance *pme); extern void helpme_setupD(struct PMEInstance *pme, int rPower, double kappa, int splineOrder, int aDim, int bDim, int cDim, double scaleFactor, int nThreads); extern void helpme_setupF(struct PMEInstance *pme, int rPower, float kappa, int splineOrder, int aDim, int bDim, int cDim, float scaleFactor, int nThreads); #if HAVE_MPI == 1 extern void helpme_setup_parallelD(PMEInstance *pme, int rPower, double kappa, int splineOrder, int dimA, int dimB, int dimC, double scaleFactor, int nThreads, MPI_Comm communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC); extern void helpme_setup_parallelF(PMEInstance *pme, int rPower, float kappa, int splineOrder, int dimA, int dimB, int dimC, float scaleFactor, int nThreads, MPI_Comm communicator, NodeOrder nodeOrder, int numNodesA, int numNodesB, int numNodesC); #endif // HAVE_MPI extern void helpme_set_lattice_vectorsD(struct PMEInstance *pme, double A, double B, double C, double kappa, double beta, double gamma, LatticeType latticeType); extern void helpme_set_lattice_vectorsF(struct PMEInstance *pme, float A, float B, float C, float kappa, float beta, float gamma, LatticeType latticeType); extern double helpme_compute_E_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters, double *coordinates); extern float helpme_compute_E_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters, float *coordinates); extern double helpme_compute_EF_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters, double *coordinates, double *forces); extern float helpme_compute_EF_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters, float *coordinates, float *forces); extern double helpme_compute_EFV_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters, double *coordinates, double *forces, double *virial); extern float helpme_compute_EFV_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters, float *coordinates, float *forces, float *virial); extern void helpme_compute_P_recD(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, double *parameters, double *coordinates, size_t nGridPoints, double *gridPoints, int derivativeLevel, double *potential); extern void helpme_compute_P_recF(struct PMEInstance *pme, size_t nAtoms, int parameterAngMom, float *parameters, float *coordinates, size_t nGridPoints, float *gridPoints, int derivativeLevel, float *potential); #endif // C++/C #endif // Header guard
deconvolution_packnto1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_packnto1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packnto1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl); const float* kptr = (const float*)weight_data_packnto1 + maxk * channels * p * packn; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float* sptr = m.row(sy) + sx * packn; int k = y * kernel_w + x; vfloat32m1_t _val = vle32_v_f32m1(sptr, vl); vfloat32m1_t _w = vle32_v_f32m1(kptr + k * packn, vl); _sum = vfmacc_vv_f32m1(_sum, _val, _w, vl); } } kptr += maxk * packn; } sum = vfmv_f_s_f32m1_f32(vfredusum_vs_f32m1_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
SparseDenseProduct.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEDENSEPRODUCT_H #define EIGEN_SPARSEDENSEPRODUCT_H namespace Eigen { namespace internal { template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; }; template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType, int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor, bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1> struct sparse_time_dense_product_impl; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; typedef typename evaluator<Lhs>::type LhsEval; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { LhsEval lhsEval(lhs); Index n = lhs.outerSize(); #ifdef EIGEN_HAS_OPENMP Eigen::initParallel(); Index threads = Eigen::nbThreads(); #endif for(Index c=0; c<rhs.cols(); ++c) { #ifdef EIGEN_HAS_OPENMP // This 20000 threshold has been found experimentally on 2D and 3D Poisson problems. // It basically represents the minimal amount of work to be done to be worth it. if(threads>1 && lhs.nonZeros() > 20000) { #pragma omp parallel for schedule(static) num_threads(threads) for(Index i=0; i<n; ++i) processRow(lhsEval,rhs,res,alpha,i,c); } else #endif { for(Index i=0; i<n; ++i) processRow(lhsEval,rhs,res,alpha,i,c); } } } static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col) { typename Res::Scalar tmp(0); for(LhsInnerIterator it(lhsEval,i); it ;++it) tmp += it.value() * rhs.coeff(it.index(),col); res.coeffRef(i,col) += alpha * tmp; } }; // FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format? template<typename T1, typename T2/*, int _Options, typename _StrideType*/> struct scalar_product_traits<T1, Ref<T2/*, _Options, _StrideType*/> > { enum { Defined = 1 }; typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType; }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { typename evaluator<Lhs>::type lhsEval(lhs); for(Index c=0; c<rhs.cols(); ++c) { for(Index j=0; j<lhs.outerSize(); ++j) { // typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c); typename internal::scalar_product_traits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res.coeffRef(it.index(),c) += it.value() * rhs_j; } } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { typename evaluator<Lhs>::type lhsEval(lhs); for(Index j=0; j<lhs.outerSize(); ++j) { typename Res::RowXpr res_j(res.row(j)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res_j += (alpha*it.value()) * rhs.row(it.index()); } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { typename evaluator<Lhs>::type lhsEval(lhs); for(Index j=0; j<lhs.outerSize(); ++j) { typename Rhs::ConstRowXpr rhs_j(rhs.row(j)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res.row(it.index()) += (alpha*it.value()) * rhs_j; } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType> inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha); } } // end namespace internal namespace internal { template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { typedef typename nested_eval<Lhs,Dynamic>::type LhsNested; typedef typename nested_eval<Rhs,Dynamic>::type RhsNested; LhsNested lhsNested(lhs); RhsNested rhsNested(rhs); internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha); } }; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType> : generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType> {}; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dst> static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { typedef typename nested_eval<Lhs,Dynamic>::type LhsNested; typedef typename nested_eval<Rhs,Dynamic>::type RhsNested; LhsNested lhsNested(lhs); RhsNested rhsNested(rhs); // transpose everything Transpose<Dst> dstT(dst); internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha); } }; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType> : generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType> {}; template<typename LhsT, typename RhsT, bool NeedToTranspose> struct sparse_dense_outer_product_evaluator { protected: typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1; typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs; typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType; // if the actual left-hand side is a dense vector, // then build a sparse-view so that we can seamlessly iterate over it. typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value, Lhs1, SparseView<Lhs1> >::type ActualLhs; typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value, Lhs1 const&, SparseView<Lhs1> >::type LhsArg; typedef typename evaluator<ActualLhs>::type LhsEval; typedef typename evaluator<ActualRhs>::type RhsEval; typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator; typedef typename ProdXprType::Scalar Scalar; public: enum { Flags = NeedToTranspose ? RowMajorBit : 0, CoeffReadCost = Dynamic }; class InnerIterator : public LhsIterator { public: InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer) : LhsIterator(xprEval.m_lhsXprImpl, 0), m_outer(outer), m_empty(false), m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() )) {} EIGEN_STRONG_INLINE Index outer() const { return m_outer; } EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); } EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; } EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; } EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); } protected: Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const { return rhs.coeff(outer); } Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse()) { typename RhsEval::InnerIterator it(rhs, outer); if (it && it.index()==0 && it.value()!=Scalar(0)) return it.value(); m_empty = true; return Scalar(0); } Index m_outer; bool m_empty; Scalar m_factor; }; sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs) : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs) {} // transpose case sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs) : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs) {} protected: const LhsArg m_lhs; typename evaluator<ActualLhs>::nestedType m_lhsXprImpl; typename evaluator<ActualRhs>::nestedType m_rhsXprImpl; }; // sparse * dense outer product template<typename Lhs, typename Rhs> struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape, typename traits<Lhs>::Scalar, typename traits<Rhs>::Scalar> : sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> { typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base; typedef Product<Lhs, Rhs> XprType; typedef typename XprType::PlainObject PlainObject; explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs()) {} }; template<typename Lhs, typename Rhs> struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape, typename traits<Lhs>::Scalar, typename traits<Rhs>::Scalar> : sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> { typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base; typedef Product<Lhs, Rhs> XprType; typedef typename XprType::PlainObject PlainObject; explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs()) {} }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPARSEDENSEPRODUCT_H
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickWand Deprecated Methods % % % % Software Design % % John Cristy % % October 2002 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define PixelViewId "PixelView" #define ThrowWandException(severity,tag,context) \ { \ (void) ThrowMagickException(wand->exception,GetMagickModule(),severity, \ tag,"`%s'",context); \ return(MagickFalse); \ } /* Typedef declarations. */ struct _PixelView { size_t id; char name[MaxTextExtent]; ExceptionInfo *exception; MagickWand *wand; CacheView *view; RectangleInfo region; size_t number_threads; PixelWand ***pixel_wands; MagickBooleanType debug; size_t signature; }; #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickAverageImages() average a set of images. % % The format of the MagickAverageImages method is: % % MagickWand *MagickAverageImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ static MagickWand *CloneMagickWandFromImages(const MagickWand *wand, Image *images) { MagickWand *clone_wand; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand)); if (clone_wand == (MagickWand *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", images->filename); (void) ResetMagickMemory(clone_wand,0,sizeof(*clone_wand)); clone_wand->id=AcquireWandId(); (void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g", MagickWandId,(double) clone_wand->id); clone_wand->exception=AcquireExceptionInfo(); InheritException(clone_wand->exception,wand->exception); clone_wand->image_info=CloneImageInfo(wand->image_info); clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info); clone_wand->images=images; clone_wand->debug=IsEventLogging(); if (clone_wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name); clone_wand->signature=WandSignature; return(clone_wand); } WandExport MagickWand *MagickAverageImages(MagickWand *wand) { Image *average_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); average_image=EvaluateImages(wand->images,MeanEvaluateOperator, wand->exception); if (average_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,average_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelView() makes a copy of the specified pixel view. % % The format of the ClonePixelView method is: % % PixelView *ClonePixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelView *ClonePixelView(const PixelView *pixel_view) { PixelView *clone_view; register ssize_t i; assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) clone_view->id); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,pixel_view->exception); clone_view->view=CloneCacheView(pixel_view->view); clone_view->region=pixel_view->region; clone_view->number_threads=pixel_view->number_threads; for (i=0; i < (ssize_t) pixel_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) pixel_view->pixel_wands[i],pixel_view->region.width); clone_view->debug=pixel_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelView() deallocates memory associated with a pixel view. % % The format of the DestroyPixelView method is: % % PixelView *DestroyPixelView(PixelView *pixel_view, % const size_t number_wands,const size_t number_threads) % % A description of each parameter follows: % % o pixel_view: the pixel view. % % o number_wand: the number of pixel wands. % % o number_threads: number of threads. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport PixelView *DestroyPixelView(PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands, pixel_view->region.width,pixel_view->number_threads); pixel_view->view=DestroyCacheView(pixel_view->view); pixel_view->exception=DestroyExceptionInfo(pixel_view->exception); pixel_view->signature=(~WandSignature); RelinquishWandId(pixel_view->id); pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view); return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferPixelViewIterator() iterates over three pixel views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel region is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination pixel view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferPixelViewIterator method is: % % MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source, % PixelView *duplex,PixelView *destination, % DuplexTransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o duplex: the duplex pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferPixelViewIterator( PixelView *source,PixelView *duplex,PixelView *destination, DuplexTransferPixelViewMethod transfer,void *context) { #define DuplexTransferPixelViewTag "PixelView/DuplexTransfer" ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict duplex_indexes, *restrict indexes; register const PixelPacket *restrict duplex_pixels, *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y, duplex->region.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->region.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag, progress++,source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewException() returns the severity, reason, and description of any % error that occurs when utilizing a pixel view. % % The format of the GetPixelViewException method is: % % char *GetPixelViewException(const PixelWand *pixel_view, % ExceptionType *severity) % % A description of each parameter follows: % % o pixel_view: the pixel pixel_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetPixelViewException(const PixelView *pixel_view, ExceptionType *severity) { char *description; assert(pixel_view != (const PixelView *) NULL); assert(pixel_view->signature == WandSignature); if (pixel_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name); assert(severity != (ExceptionType *) NULL); *severity=pixel_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", pixel_view->name); *description='\0'; if (pixel_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->reason), MaxTextExtent); if (pixel_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( pixel_view->exception->severity,pixel_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w H e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewHeight() returns the pixel view height. % % The format of the GetPixelViewHeight method is: % % size_t GetPixelViewHeight(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewHeight(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.height); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewIterator() iterates over the pixel view in parallel and calls % your get method for each scanline of the view. The pixel region is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetPixelViewIterator method is: % % MagickBooleanType GetPixelViewIterator(PixelView *source, % GetPixelViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetPixelViewIterator(PixelView *source, GetPixelViewMethod get,void *context) { #define GetPixelViewTag "PixelView/Get" Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (get == (GetPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetPixelViewIterator) #endif proceed=SetImageProgress(source_image,GetPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewPixels() returns the pixel view pixel_wands. % % The format of the GetPixelViewPixels method is: % % PixelWand *GetPixelViewPixels(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view) { const int id = GetOpenMPThreadId(); assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWand() returns the magick wand associated with the pixel view. % % The format of the GetPixelViewWand method is: % % MagickWand *GetPixelViewWand(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w W i d t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewWidth() returns the pixel view width. % % The format of the GetPixelViewWidth method is: % % size_t GetPixelViewWidth(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport size_t GetPixelViewWidth(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w X % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewX() returns the pixel view x offset. % % The format of the GetPixelViewX method is: % % ssize_t GetPixelViewX(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewX(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.x); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l V i e w Y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelViewY() returns the pixel view y offset. % % The format of the GetPixelViewY method is: % % ssize_t GetPixelViewY(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport ssize_t GetPixelViewY(const PixelView *pixel_view) { assert(pixel_view != (PixelView *) NULL); assert(pixel_view->signature == WandSignature); return(pixel_view->region.y); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPixelView() returns MagickTrue if the the parameter is verified as a pixel % view container. % % The format of the IsPixelView method is: % % MagickBooleanType IsPixelView(const PixelView *pixel_view) % % A description of each parameter follows: % % o pixel_view: the pixel view. % */ WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view) { size_t length; if (pixel_view == (const PixelView *) NULL) return(MagickFalse); if (pixel_view->signature != WandSignature) return(MagickFalse); length=strlen(PixelViewId); if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C l i p P a t h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickClipPathImage() clips along the named paths from the 8BIM profile, if % present. Later operations take effect inside the path. Id may be a number % if preceded with #, to work on a numbered path, e.g., "#1" to use the first % path. % % The format of the MagickClipPathImage method is: % % MagickBooleanType MagickClipPathImage(MagickWand *wand, % const char *pathname,const MagickBooleanType inside) % % A description of each parameter follows: % % o wand: the magick wand. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand, const char *pathname,const MagickBooleanType inside) { return(MagickClipImagePath(wand,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetFillAlpha() returns the alpha used when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawGetFillAlpha method is: % % double DrawGetFillAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport double DrawGetFillAlpha(const DrawingWand *wand) { return(DrawGetFillOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGetStrokeAlpha() returns the alpha of stroked object outlines. % % The format of the DrawGetStrokeAlpha method is: % % double DrawGetStrokeAlpha(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. */ WandExport double DrawGetStrokeAlpha(const DrawingWand *wand) { return(DrawGetStrokeOpacity(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P e e k G r a p h i c W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPeekGraphicWand() returns the current drawing wand. % % The format of the PeekDrawingWand method is: % % DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand) { return(PeekDrawingWand(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P o p G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPopGraphicContext() destroys the current drawing wand and returns to the % previously pushed drawing wand. Multiple drawing wands may exist. It is an % error to attempt to pop more drawing wands than have been pushed, and it is % proper form to pop all drawing wands which have been pushed. % % The format of the DrawPopGraphicContext method is: % % MagickBooleanType DrawPopGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPopGraphicContext(DrawingWand *wand) { (void) PopDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P u s h G r a p h i c C o n t e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPushGraphicContext() clones the current drawing wand to create a new % drawing wand. The original drawing wand(s) may be returned to by % invoking PopDrawingWand(). The drawing wands are stored on a drawing wand % stack. For every Pop there must have already been an equivalent Push. % % The format of the DrawPushGraphicContext method is: % % MagickBooleanType DrawPushGraphicContext(DrawingWand *wand) % % A description of each parameter follows: % % o wand: the drawing wand. % */ WandExport void DrawPushGraphicContext(DrawingWand *wand) { (void) PushDrawingWand(wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t F i l l A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetFillAlpha() sets the alpha to use when drawing using the fill % color or fill texture. Fully opaque is 1.0. % % The format of the DrawSetFillAlpha method is: % % void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o fill_alpha: fill alpha % */ WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha) { DrawSetFillOpacity(wand,fill_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w S e t S t r o k e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawSetStrokeAlpha() specifies the alpha of stroked object outlines. % % The format of the DrawSetStrokeAlpha method is: % % void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) % % A description of each parameter follows: % % o wand: the drawing wand. % % o stroke_alpha: stroke alpha. The value 1.0 is opaque. % */ WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha) { DrawSetStrokeOpacity(wand,stroke_alpha); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickColorFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickColorFloodfillImage method is: % % MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, % const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand, const PixelWand *fill,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); PixelGetQuantumColor(fill,&draw_info->fill); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=ColorFloodfillImage(wand->images,draw_info,target,x,y, bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickDescribeImage() identifies an image by printing its attributes to the % file. Attributes include the image width, height, size, and others. % % The format of the MagickDescribeImage method is: % % const char *MagickDescribeImage(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport char *MagickDescribeImage(MagickWand *wand) { return(MagickIdentifyImage(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k F l a t t e n I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickFlattenImages() merges a sequence of images. This useful for % combining Photoshop layers into a single image. % % The format of the MagickFlattenImages method is: % % MagickWand *MagickFlattenImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickFlattenImages(MagickWand *wand) { Image *flatten_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); flatten_image=FlattenImages(wand->images,wand->exception); if (flatten_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,flatten_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageAttribute() returns a value associated with the specified % property. Use MagickRelinquishMemory() to free the value when you are % finished with it. % % The format of the MagickGetImageAttribute method is: % % char *MagickGetImageAttribute(MagickWand *wand,const char *property) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % */ WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property) { return(MagickGetImageProperty(wand,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageIndex() returns the index of the current image. % % The format of the MagickGetImageIndex method is: % % ssize_t MagickGetImageIndex(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport ssize_t MagickGetImageIndex(MagickWand *wand) { return(MagickGetIteratorIndex(wand)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageChannelExtrema() gets the extrema for one or more image % channels. % % The format of the MagickGetImageChannelExtrema method is: % % MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, % const ChannelType channel,size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the image channel(s). % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand, const ChannelType channel,size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageChannelExtrema(wand->images,channel,minima,maxima, wand->exception); return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageExtrema() gets the extrema for the image. % % The format of the MagickGetImageExtrema method is: % % MagickBooleanType MagickGetImageExtrema(MagickWand *wand, % size_t *minima,size_t *maxima) % % A description of each parameter follows: % % o wand: the magick wand. % % o minima: The minimum pixel value for the specified channel(s). % % o maxima: The maximum pixel value for the specified channel(s). % */ WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand, size_t *minima,size_t *maxima) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=GetImageExtrema(wand->images,minima,maxima,wand->exception); return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e M a t t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageMatte() returns MagickTrue if the image has a matte channel % otherwise MagickFalse. % % The format of the MagickGetImageMatte method is: % % size_t MagickGetImageMatte(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(wand->images->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImagePixels() extracts pixel data from an image and returns it to % you. The method returns MagickTrue on success otherwise MagickFalse if an % error is encountered. The data is returned as char, short int, int, ssize_t, % float, or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickGetImagePixels method is: % % MagickBooleanType MagickGetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel, % LongPixel, QuantumPixel, or ShortPixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, void *pixels) { return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k G e t I m a g e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickGetImageSize() returns the image length in bytes. % % The format of the MagickGetImageSize method is: % % MagickBooleanType MagickGetImageSize(MagickWand *wand, % MagickSizeType *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the image length in bytes. % */ WandExport MagickSizeType MagickGetImageSize(MagickWand *wand) { assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); return(GetBlobSize(wand->images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMapImage() replaces the colors of an image with the closest color % from a reference image. % % The format of the MagickMapImage method is: % % MagickBooleanType MagickMapImage(MagickWand *wand, % const MagickWand *map_wand,const MagickBooleanType dither) % % A description of each parameter follows: % % o wand: the magick wand. % % o map: the map wand. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ WandExport MagickBooleanType MagickMapImage(MagickWand *wand, const MagickWand *map_wand,const MagickBooleanType dither) { MagickBooleanType status; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL)) ThrowWandException(WandError,"ContainsNoImages",wand->name); status=MapImage(wand->images,map_wand->images,dither); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMatteFloodfillImage() changes the transparency value of any pixel that % matches target and is an immediate neighbor. If the method % FillToBorderMethod is specified, the transparency value is changed for any % neighbor pixel that does not match the bordercolor member of image. % % The format of the MagickMatteFloodfillImage method is: % % MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, % const double alpha,const double fuzz,const PixelWand *bordercolor, % const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand, const double alpha,const double fuzz,const PixelWand *bordercolor, const ssize_t x,const ssize_t y) { DrawInfo *draw_info; MagickBooleanType status; PixelPacket target; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL); (void) GetOneVirtualPixel(wand->images,x % wand->images->columns, y % wand->images->rows,&target,wand->exception); if (bordercolor != (PixelWand *) NULL) PixelGetQuantumColor(bordercolor,&target); wand->images->fuzz=fuzz; status=MatteFloodfillImage(wand->images,target,ClampToQuantum( (MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod); if (status == MagickFalse) InheritException(wand->exception,&wand->images->exception); draw_info=DestroyDrawInfo(draw_info); return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The format of the MagickMedianFilterImage method is: % % MagickBooleanType MagickMedianFilterImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand, const double radius) { Image *median_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); median_image=MedianFilterImage(wand->images,radius,wand->exception); if (median_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,median_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMinimumImages() returns the minimum intensity of an image sequence. % % The format of the MagickMinimumImages method is: % % MagickWand *MagickMinimumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMinimumImages(MagickWand *wand) { Image *minimum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); minimum_image=EvaluateImages(wand->images,MinEvaluateOperator, wand->exception); if (minimum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,minimum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickModeImage() makes each pixel the 'predominant color' of the % neighborhood of the specified radius. % % The format of the MagickModeImage method is: % % MagickBooleanType MagickModeImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickModeImage(MagickWand *wand, const double radius) { Image *mode_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); mode_image=ModeImage(wand->images,radius,wand->exception); if (mode_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,mode_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMosaicImages() inlays an image sequence to form a single coherent % picture. It returns a wand with each image in the sequence composited at % the location defined by the page offset of the image. % % The format of the MagickMosaicImages method is: % % MagickWand *MagickMosaicImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMosaicImages(MagickWand *wand) { Image *mosaic_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); mosaic_image=MosaicImages(wand->images,wand->exception); if (mosaic_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,mosaic_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickOpaqueImage method is: % % MagickBooleanType MagickOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImage(wand,target,fill,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintFloodfillImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % The format of the MagickPaintFloodfillImage method is: % % MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, % const ChannelType channel,const PixelWand *fill,const double fuzz, % const PixelWand *bordercolor,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o fill: the floodfill color pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % % o bordercolor: the border color pixel wand. % % o x,y: the starting location of the operation. % */ WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand, const ChannelType channel,const PixelWand *fill,const double fuzz, const PixelWand *bordercolor,const ssize_t x,const ssize_t y) { MagickBooleanType status; status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y, MagickFalse); return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % The format of the MagickPaintOpaqueImage method is: % % MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, % const PixelWand *target,const PixelWand *fill,const double fuzz) % MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, % const ChannelType channel,const PixelWand *target, % const PixelWand *fill,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o channel: the channel(s). % % o target: Change this target color to the fill color within the image. % % o fill: the fill pixel wand. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand, const PixelWand *target,const PixelWand *fill,const double fuzz) { return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz)); } WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand, const ChannelType channel,const PixelWand *target,const PixelWand *fill, const double fuzz) { MagickBooleanType status; status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz, MagickFalse); return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickPaintTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickPaintTransparentImage method is: % % MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRecolorImage() apply color transformation to an image. The method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the MagickRecolorImage method is: % % MagickBooleanType MagickRecolorImage(MagickWand *wand, % const size_t order,const double *color_matrix) % % A description of each parameter follows: % % o wand: the magick wand. % % o order: the number of columns and rows in the color matrix. % % o color_matrix: An array of doubles representing the color matrix. % */ WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand, const size_t order,const double *color_matrix) { Image *transform_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (color_matrix == (const double *) NULL) return(MagickFalse); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); transform_image=RecolorImage(wand->images,order,color_matrix, wand->exception); if (transform_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,transform_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickReduceNoiseImage() smooths the contours of an image while still % preserving edge information. The algorithm works by replacing each pixel % with its neighbor closest in value. A neighbor is defined by radius. Use % a radius of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the MagickReduceNoiseImage method is: % % MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, % const double radius) % % A description of each parameter follows: % % o wand: the magick wand. % % o radius: the radius of the pixel neighborhood. % */ WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand, const double radius) { Image *noise_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) ThrowWandException(WandError,"ContainsNoImages",wand->name); noise_image=ReduceNoiseImage(wand->images,radius,wand->exception); if (noise_image == (Image *) NULL) return(MagickFalse); ReplaceImageInList(&wand->images,noise_image); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMaximumImages() returns the maximum intensity of an image sequence. % % The format of the MagickMaximumImages method is: % % MagickWand *MagickMaximumImages(MagickWand *wand) % % A description of each parameter follows: % % o wand: the magick wand. % */ WandExport MagickWand *MagickMaximumImages(MagickWand *wand) { Image *maximum_image; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); if (wand->images == (Image *) NULL) return((MagickWand *) NULL); maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator, wand->exception); if (maximum_image == (Image *) NULL) return((MagickWand *) NULL); return(CloneMagickWandFromImages(wand,maximum_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageAttribute() associates a property with an image. % % The format of the MagickSetImageAttribute method is: % % MagickBooleanType MagickSetImageAttribute(MagickWand *wand, % const char *property,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o property: the property. % % o value: the value. % */ WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand, const char *property,const char *value) { return(SetImageProperty(wand->images,property,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageIndex() set the current image to the position of the list % specified with the index parameter. % % The format of the MagickSetImageIndex method is: % % MagickBooleanType MagickSetImageIndex(MagickWand *wand, % const ssize_t index) % % A description of each parameter follows: % % o wand: the magick wand. % % o index: the scene number. % */ WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand, const ssize_t index) { return(MagickSetIteratorIndex(wand,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a g i c k S e t I m a g e O p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImageOption() associates one or options with a particular image % format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes"). % % The format of the MagickSetImageOption method is: % % MagickBooleanType MagickSetImageOption(MagickWand *wand, % const char *format,const char *key,const char *value) % % A description of each parameter follows: % % o wand: the magick wand. % % o format: the image format. % % o key: The key. % % o value: The value. % */ WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand, const char *format,const char *key,const char *value) { char option[MaxTextExtent]; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); if (wand->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name); (void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value); return(DefineImageOption(wand->image_info,option)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickTransparentImage() changes any pixel that matches color with the % color defined by fill. % % The format of the MagickTransparentImage method is: % % MagickBooleanType MagickTransparentImage(MagickWand *wand, % const PixelWand *target,const double alpha,const double fuzz) % % A description of each parameter follows: % % o wand: the magick wand. % % o target: Change this target color to specified opacity value within % the image. % % o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully % transparent. % % o fuzz: By default target must match a particular pixel color % exactly. However, in many cases two colors may differ by a small amount. % The fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now interpreted % as the same color for the purposes of the floodfill. % */ WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand, const PixelWand *target,const double alpha,const double fuzz) { return(MagickPaintTransparentImage(wand,target,alpha,fuzz)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k R e g i o n O f I n t e r e s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickRegionOfInterestImage() extracts a region of the image and returns it % as a new wand. % % The format of the MagickRegionOfInterestImage method is: % % MagickWand *MagickRegionOfInterestImage(MagickWand *wand, % const size_t width,const size_t height,const ssize_t x, % const ssize_t y) % % A description of each parameter follows: % % o wand: the magick wand. % % o width: the region width. % % o height: the region height. % % o x: the region x offset. % % o y: the region y offset. % */ WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand, const size_t width,const size_t height,const ssize_t x, const ssize_t y) { return(MagickGetImageRegion(wand,width,height,x,y)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSetImagePixels() accepts pixel datand stores it in the image at the % location you specify. The method returns MagickFalse on success otherwise % MagickTrue if an error is encountered. The pixel data can be either char, % short int, int, ssize_t, float, or double in the order specified by map. % % Suppose your want to upload the first scanline of a 640x480 image from % character data in red-green-blue order: % % MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels); % % The format of the MagickSetImagePixels method is: % % MagickBooleanType MagickSetImagePixels(MagickWand *wand, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,const char *map,const StorageType storage, % const void *pixels) % % A description of each parameter follows: % % o wand: the magick wand. % % o x, y, columns, rows: These values define the perimeter of a region % of pixels you want to define. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan, % Y = yellow, M = magenta, K = black, I = intensity (for grayscale), % P = pad. % % o storage: Define the data type of the pixels. Float and double types are % expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from % these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, % or DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % */ WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,const char *map,const StorageType storage, const void *pixels) { return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k W r i t e I m a g e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickWriteImageBlob() implements direct to memory image formats. It % returns the image as a blob and its length. Use MagickSetFormat() to % set the format of the returned blob (GIF, JPEG, PNG, etc.). % % Use MagickRelinquishMemory() to free the blob when you are done with it. % % The format of the MagickWriteImageBlob method is: % % unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) % % A description of each parameter follows: % % o wand: the magick wand. % % o length: the length of the blob. % */ WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length) { return(MagickGetImageBlob(wand,length)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelView() returns a pixel view required for all other methods in the % Pixel View API. % % The format of the NewPixelView method is: % % PixelView *NewPixelView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport PixelView *NewPixelView(MagickWand *wand) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->wand=wand; pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->region.width=wand->images->columns; pixel_view->region.height=wand->images->rows; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w P i x e l V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewPixelViewRegion() returns a pixel view required for all other methods % in the Pixel View API. % % The format of the NewPixelViewRegion method is: % % PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a region of % pixel_wands view. % */ WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { PixelView *pixel_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickSignature); pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view)); if (pixel_view == (PixelView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view)); pixel_view->id=AcquireWandId(); (void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g", PixelViewId,(double) pixel_view->id); pixel_view->exception=AcquireExceptionInfo(); pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images, pixel_view->exception); pixel_view->wand=wand; pixel_view->region.width=width; pixel_view->region.height=height; pixel_view->region.x=x; pixel_view->region.y=y; pixel_view->number_threads=GetOpenMPMaximumThreads(); pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width, pixel_view->number_threads); if (pixel_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); pixel_view->debug=IsEventLogging(); pixel_view->signature=WandSignature; return(pixel_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l G e t N e x t R o w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelGetNextRow() returns the next row as an array of pixel wands from the % pixel iterator. % % The format of the PixelGetNextRow method is: % % PixelWand **PixelGetNextRow(PixelIterator *iterator, % size_t *number_wands) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o number_wands: the number of pixel wands. % */ WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator) { size_t number_wands; return(PixelGetNextIteratorRow(iterator,&number_wands)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P i x e l I t e r a t o r G e t E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PixelIteratorGetException() returns the severity, reason, and description of % any error that occurs when using other methods in this API. % % The format of the PixelIteratorGetException method is: % % char *PixelIteratorGetException(const Pixeliterator *iterator, % ExceptionType *severity) % % A description of each parameter follows: % % o iterator: the pixel iterator. % % o severity: the severity of the error is returned here. % */ WandExport char *PixelIteratorGetException(const PixelIterator *iterator, ExceptionType *severity) { return(PixelGetIteratorException(iterator,severity)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelViewIterator() iterates over the pixel view in parallel and calls % your set method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetPixelViewIterator method is: % % MagickBooleanType SetPixelViewIterator(PixelView *destination, % SetPixelViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the pixel view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination, SetPixelViewMethod set,void *context) { #define SetPixelViewTag "PixelView/Set" ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(destination != (PixelView *) NULL); assert(destination->signature == WandSignature); if (set == (SetPixelViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=destination->region.y; y < (ssize_t) destination->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x, y,destination->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetPixelViewIterator) #endif proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++, destination->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferPixelViewIterator() iterates over two pixel views in parallel and % calls your transfer method for each scanline of the view. The source pixel % region is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination pixel view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferPixelViewIterator method is: % % MagickBooleanType TransferPixelViewIterator(PixelView *source, % PixelView *destination,TransferPixelViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o destination: the destination pixel view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source, PixelView *destination,TransferPixelViewMethod transfer,void *context) { #define TransferPixelViewTag "PixelView/Transfer" ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferPixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y, source->region.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->region.x,y,destination->region.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->region.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->region.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->region.width; x++) SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferPixelViewIterator) #endif proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e P i x e l V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdatePixelViewIterator() iterates over the pixel view in parallel and calls % your update method for each scanline of the view. The pixel region is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % Use this pragma: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdatePixelViewIterator method is: % % MagickBooleanType UpdatePixelViewIterator(PixelView *source, % UpdatePixelViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source pixel view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source, UpdatePixelViewMethod update,void *context) { #define UpdatePixelViewTag "PixelView/Update" ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(source != (PixelView *) NULL); assert(source->signature == WandSignature); if (update == (UpdatePixelViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=source->region.y; y < (ssize_t) source->region.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y, source->region.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->region.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (update(source,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->region.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->region.width; x++) SetPixelIndex(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdatePixelViewIterator) #endif proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++, source->region.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status == MagickFalse ? 0 : 1); } #endif
blas_server_omp.c
/*********************************************************************/ /* Copyright 2009, 2010 The University of Texas at Austin. */ /* All rights reserved. */ /* */ /* Redistribution and use in source and binary forms, with or */ /* without modification, are permitted provided that the following */ /* conditions are met: */ /* */ /* 1. Redistributions of source code must retain the above */ /* copyright notice, this list of conditions and the following */ /* disclaimer. */ /* */ /* 2. Redistributions in binary form must reproduce the above */ /* copyright notice, this list of conditions and the following */ /* disclaimer in the documentation and/or other materials */ /* provided with the distribution. */ /* */ /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ /* */ /* The views and conclusions contained in the software and */ /* documentation are those of the authors and should not be */ /* interpreted as representing official policies, either expressed */ /* or implied, of The University of Texas at Austin. */ /*********************************************************************/ #include <stdbool.h> #include <stdio.h> #include <stdlib.h> //#include <sys/mman.h> #include "common.h" #ifndef USE_OPENMP #include "blas_server.c" #else #ifndef OMP_SCHED #define OMP_SCHED static #endif int blas_server_avail = 0; static void * blas_thread_buffer[MAX_PARALLEL_NUMBER][MAX_CPU_NUMBER]; #ifdef HAVE_C11 static atomic_bool blas_buffer_inuse[MAX_PARALLEL_NUMBER]; #else static _Bool blas_buffer_inuse[MAX_PARALLEL_NUMBER]; #endif void goto_set_num_threads(int num_threads) { int i=0, j=0; if (num_threads < 1) num_threads = blas_num_threads; if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER; if (num_threads > blas_num_threads) { blas_num_threads = num_threads; } blas_cpu_number = num_threads; omp_set_num_threads(blas_cpu_number); //adjust buffer for each thread for(i=0; i<MAX_PARALLEL_NUMBER; i++) { for(j=0; j<blas_cpu_number; j++){ if(blas_thread_buffer[i][j]==NULL){ blas_thread_buffer[i][j]=blas_memory_alloc(2); } } for(; j<MAX_CPU_NUMBER; j++){ if(blas_thread_buffer[i][j]!=NULL){ blas_memory_free(blas_thread_buffer[i][j]); blas_thread_buffer[i][j]=NULL; } } } #if defined(ARCH_MIPS64) //set parameters for different number of threads. blas_set_parameter(); #endif } void openblas_set_num_threads(int num_threads) { goto_set_num_threads(num_threads); } int blas_thread_init(void){ int i=0, j=0; blas_get_cpu_number(); blas_server_avail = 1; for(i=0; i<MAX_PARALLEL_NUMBER; i++) { for(j=0; j<blas_num_threads; j++){ blas_thread_buffer[i][j]=blas_memory_alloc(2); } for(; j<MAX_CPU_NUMBER; j++){ blas_thread_buffer[i][j]=NULL; } } return 0; } int BLASFUNC(blas_thread_shutdown)(void){ int i=0, j=0; blas_server_avail = 0; for(i=0; i<MAX_PARALLEL_NUMBER; i++) { for(j=0; j<MAX_CPU_NUMBER; j++){ if(blas_thread_buffer[i][j]!=NULL){ blas_memory_free(blas_thread_buffer[i][j]); blas_thread_buffer[i][j]=NULL; } } } return 0; } static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ if (!(mode & BLAS_COMPLEX)){ #ifdef EXPRECISION if ((mode & BLAS_PREC) == BLAS_XDOUBLE){ /* REAL / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((xdouble *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else #endif if ((mode & BLAS_PREC) == BLAS_DOUBLE){ /* REAL / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((double *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else if ((mode & BLAS_PREC) == BLAS_SINGLE){ /* REAL / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((float *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); #ifdef BUILD_HALF } else if ((mode & BLAS_PREC) == BLAS_BFLOAT16){ /* REAL / BFLOAT16 */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, bfloat16, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((bfloat16 *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else if ((mode & BLAS_PREC) == BLAS_STOBF16){ /* REAL / BLAS_STOBF16 */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, bfloat16 *, BLASLONG, float *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((float *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else if ((mode & BLAS_PREC) == BLAS_DTOBF16){ /* REAL / BLAS_DTOBF16 */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, bfloat16 *, BLASLONG, double *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((double *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); #endif } else { /* REAL / Other types in future */ } } else { #ifdef EXPRECISION if ((mode & BLAS_PREC) == BLAS_XDOUBLE){ /* COMPLEX / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((xdouble *)args -> alpha)[0], ((xdouble *)args -> alpha)[1], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else #endif if ((mode & BLAS_PREC) == BLAS_DOUBLE){ /* COMPLEX / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((double *)args -> alpha)[0], ((double *)args -> alpha)[1], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else if ((mode & BLAS_PREC) == BLAS_SINGLE){ /* COMPLEX / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((float *)args -> alpha)[0], ((float *)args -> alpha)[1], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else { /* COMPLEX / Other types in future */ } } } static void exec_threads(blas_queue_t *queue, int buf_index){ void *buffer, *sa, *sb; int pos=0, release_flag=0; buffer = NULL; sa = queue -> sa; sb = queue -> sb; #ifdef CONSISTENT_FPCSR __asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode)); __asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode)); #endif if ((sa == NULL) && (sb == NULL) && ((queue -> mode & BLAS_PTHREAD) == 0)) { pos = omp_get_thread_num(); buffer = blas_thread_buffer[buf_index][pos]; //fallback if(buffer==NULL) { buffer = blas_memory_alloc(2); release_flag=1; } if (sa == NULL) { sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A); queue->sa=sa; } if (sb == NULL) { if (!(queue -> mode & BLAS_COMPLEX)){ #ifdef EXPRECISION if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){ sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE){ sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else { /* Other types in future */ } } else { #ifdef EXPRECISION if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){ sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE) { sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else { /* Other types in future */ } } queue->sb=sb; } } if (queue -> mode & BLAS_LEGACY) { legacy_exec(queue -> routine, queue -> mode, queue -> args, sb); } else if (queue -> mode & BLAS_PTHREAD) { void (*pthreadcompat)(void *) = queue -> routine; (pthreadcompat)(queue -> args); } else { int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine; (routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position); } if (release_flag) blas_memory_free(buffer); } int exec_blas(BLASLONG num, blas_queue_t *queue){ BLASLONG i, buf_index; if ((num <= 0) || (queue == NULL)) return 0; #ifdef CONSISTENT_FPCSR for (i = 0; i < num; i ++) { __asm__ __volatile__ ("fnstcw %0" : "=m" (queue[i].x87_mode)); __asm__ __volatile__ ("stmxcsr %0" : "=m" (queue[i].sse_mode)); } #endif while(true) { for(i=0; i < MAX_PARALLEL_NUMBER; i++) { #ifdef HAVE_C11 _Bool inuse = false; if(atomic_compare_exchange_weak(&blas_buffer_inuse[i], &inuse, true)) { #else if(blas_buffer_inuse[i] == false) { blas_buffer_inuse[i] = true; #endif buf_index = i; break; } } if(i != MAX_PARALLEL_NUMBER) break; } #pragma omp parallel for num_threads(num) schedule(OMP_SCHED) for (i = 0; i < num; i ++) { #ifndef USE_SIMPLE_THREADED_LEVEL3 queue[i].position = i; #endif exec_threads(&queue[i], buf_index); } #ifdef HAVE_C11 atomic_store(&blas_buffer_inuse[buf_index], false); #else blas_buffer_inuse[buf_index] = false; #endif return 0; } #endif
GB_unaryop__minv_fp32_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp32_int32 // op(A') function: GB_tran__minv_fp32_int32 // C type: float // A type: int32_t // cast: float cij = (float) aij // unaryop: cij = (1.0F)/aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = (1.0F)/x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp32_int32 ( float *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
reduction_teams.c
#include <stdio.h> #include <omp.h> #define N 1000000ll #define SUM (N * (N-1)/2) void checkHost(int gpu_error, int* errors, long long a){ int host_error = 0; if (a != SUM){ printf ("Host - Incorrect result = %lld, expected = %lld!\n", a, SUM); host_error = 1; (*errors)++; } if(!host_error && !gpu_error){ printf("-----> Success\n"); } else{ printf("-----> Failure\n"); } } void reduction(int num_teams, int num_threads, int* errors){ long long result = 0; int gpu_error = 0; int device_teams = 0; int device_threads = 0; #pragma omp target teams num_teams(num_teams) thread_limit(num_threads) map(tofrom: result) map(from:device_teams,device_threads) { long long a, i; a = 0; #pragma omp parallel for reduction(+:a) for (i = 0; i < N; i++) { a += i; device_threads = omp_get_num_threads(); } result = a; if (a != SUM && omp_get_team_num() <= 50){ //limit teams that print printf ("GPU - Incorrect result = %lld, expected = %lld!\n", a, SUM); gpu_error = 1; } device_teams = omp_get_num_teams(); } //end of target // Spec says you cannot have more teams than num_teams clause if ( device_teams > num_teams ) { (*errors)++; gpu_error++; printf("ERROR: num_teams requested:%d actual teams on device:%d\n", num_teams, device_teams); } // Spec says you cannot have more threads than thread_limit clause if ( device_threads > num_threads ) { (*errors)++; gpu_error++; printf("ERROR: num_threads limit:%d Actual threads on device:%d\n", num_threads, device_threads); } checkHost(gpu_error, errors, result); } int main (void) { int errors = 0; int gpu_error = 0; printf("\n---------- Multiple Teams ----------\n"); printf("\nRunning 2 Teams with 64 thread per team\n"); reduction(2, 64, &errors); printf("\nRunning 2 Teams with 128 threads per team\n"); reduction(2, 128, &errors); printf("\nRunning 2 Teams with 256 threads per team\n"); reduction(2, 256, &errors); printf("\nRunning 256 Teams with 256 threads per team (Limited to print first 50 teams)\n"); reduction(256, 256, &errors); printf("\nRunning 4096 Teams with 64 threads per team (Limited to print first 50 teams)\n"); reduction(4096, 64, &errors); printf("\nRunning 4096 Teams with 256 threads per team (Limited to print first 50 teams)\n"); reduction(4096, 256, &errors); if(!errors){ printf("\nRESULT: ALL RUNS SUCCESSFUL!\n"); return 0; } else{ printf("\nRESULT: FAILURES OCCURED!\n"); return -1; } }
task-1.c
extern void abort (void); int a = 18; void f1 (int i, int j, int k) { int l = 6, m = 7, n = 8; #pragma omp task private(j, m) shared(k, n) { j = 6; m = 5; if (++a != 19 || ++i != 9 || j != 6 || ++l != 7 || m != 5 || ++n != 9) #pragma omp atomic k++; } #pragma omp taskwait if (a != 19 || i != 8 || j != 26 || k != 0 || l != 6 || m != 7 || n != 9) abort (); } int v1 = 1, v2 = 2, v5 = 5; int err; void f2 (void) { int v3 = 3; #pragma omp sections private (v1) firstprivate (v2) { #pragma omp section { int v4 = 4; v1 = 7; #pragma omp task { if (++v1 != 8 || ++v2 != 3 || ++v3 != 4 || ++v4 != 5 || ++v5 != 6) err = 1; } #pragma omp taskwait if (v1 != 7 || v2 != 2 || v3 != 3 || v4 != 4 || v5 != 6) abort (); if (err) abort (); } } } void f3 (int i, int j, int k) { int l = 6, m = 7, n = 8; #pragma omp task private(j, m) shared(k, n) untied { j = 6; m = 5; if (++a != 19 || ++i != 9 || j != 6 || ++l != 7 || m != 5 || ++n != 9) #pragma omp atomic k++; } #pragma omp taskwait if (a != 19 || i != 8 || j != 26 || k != 0 || l != 6 || m != 7 || n != 9) abort (); } int main (void) { f1 (8, 26, 0); f2 (); a = 18; f3 (8, 26, 0); a = 18; #pragma omp parallel num_threads(4) { #pragma omp master { f1 (8, 26, 0); a = 18; f3 (8, 26, 0); } } return 0; }
gimplify.c
/* Tree lowering pass. This pass converts the GENERIC functions-as-trees tree representation into the GIMPLE form. Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. Major work done by Sebastian Pop <s.pop@laposte.net>, Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "varray.h" #include "tree-gimple.h" #include "tree-inline.h" #include "diagnostic.h" #include "langhooks.h" #include "langhooks-def.h" #include "tree-flow.h" #include "cgraph.h" #include "timevar.h" #include "except.h" #include "hashtab.h" #include "flags.h" #include "real.h" #include "function.h" #include "output.h" #include "expr.h" #include "ggc.h" #include "toplev.h" #include "target.h" #include "optabs.h" #include "pointer-set.h" enum gimplify_omp_var_data { GOVD_SEEN = 1, GOVD_EXPLICIT = 2, GOVD_SHARED = 4, GOVD_PRIVATE = 8, GOVD_FIRSTPRIVATE = 16, GOVD_LASTPRIVATE = 32, GOVD_REDUCTION = 64, GOVD_LOCAL = 128, GOVD_DEBUG_PRIVATE = 256, GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LOCAL) }; struct gimplify_omp_ctx { struct gimplify_omp_ctx *outer_context; splay_tree variables; struct pointer_set_t *privatized_types; location_t location; enum omp_clause_default_kind default_kind; bool is_parallel; bool is_combined_parallel; }; struct gimplify_ctx { struct gimplify_ctx *prev_context; tree current_bind_expr; tree temps; tree conditional_cleanups; tree exit_label; tree return_temp; VEC(tree,heap) *case_labels; /* The formal temporary table. Should this be persistent? */ htab_t temp_htab; int conditions; bool save_stack; bool into_ssa; }; static struct gimplify_ctx *gimplify_ctxp; static struct gimplify_omp_ctx *gimplify_omp_ctxp; /* Formal (expression) temporary table handling: Multiple occurrences of the same scalar expression are evaluated into the same temporary. */ typedef struct gimple_temp_hash_elt { tree val; /* Key */ tree temp; /* Value */ } elt_t; /* Forward declarations. */ static enum gimplify_status gimplify_compound_expr (tree *, tree *, bool); #ifdef ENABLE_CHECKING static bool cpt_same_type (tree a, tree b); #endif /* Return a hash value for a formal temporary table entry. */ static hashval_t gimple_tree_hash (const void *p) { tree t = ((const elt_t *) p)->val; return iterative_hash_expr (t, 0); } /* Compare two formal temporary table entries. */ static int gimple_tree_eq (const void *p1, const void *p2) { tree t1 = ((const elt_t *) p1)->val; tree t2 = ((const elt_t *) p2)->val; enum tree_code code = TREE_CODE (t1); if (TREE_CODE (t2) != code || TREE_TYPE (t1) != TREE_TYPE (t2)) return 0; if (!operand_equal_p (t1, t2, 0)) return 0; /* Only allow them to compare equal if they also hash equal; otherwise results are nondeterminate, and we fail bootstrap comparison. */ gcc_assert (gimple_tree_hash (p1) == gimple_tree_hash (p2)); return 1; } /* Set up a context for the gimplifier. */ void push_gimplify_context (void) { struct gimplify_ctx *c; c = (struct gimplify_ctx *) xcalloc (1, sizeof (struct gimplify_ctx)); c->prev_context = gimplify_ctxp; if (optimize) c->temp_htab = htab_create (1000, gimple_tree_hash, gimple_tree_eq, free); gimplify_ctxp = c; } /* Tear down a context for the gimplifier. If BODY is non-null, then put the temporaries into the outer BIND_EXPR. Otherwise, put them in the unexpanded_var_list. */ void pop_gimplify_context (tree body) { struct gimplify_ctx *c = gimplify_ctxp; tree t; gcc_assert (c && !c->current_bind_expr); gimplify_ctxp = c->prev_context; for (t = c->temps; t ; t = TREE_CHAIN (t)) DECL_GIMPLE_FORMAL_TEMP_P (t) = 0; if (body) declare_vars (c->temps, body, false); else record_vars (c->temps); if (optimize) htab_delete (c->temp_htab); free (c); } static void gimple_push_bind_expr (tree bind) { TREE_CHAIN (bind) = gimplify_ctxp->current_bind_expr; gimplify_ctxp->current_bind_expr = bind; } static void gimple_pop_bind_expr (void) { gimplify_ctxp->current_bind_expr = TREE_CHAIN (gimplify_ctxp->current_bind_expr); } tree gimple_current_bind_expr (void) { return gimplify_ctxp->current_bind_expr; } /* Returns true iff there is a COND_EXPR between us and the innermost CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */ static bool gimple_conditional_context (void) { return gimplify_ctxp->conditions > 0; } /* Note that we've entered a COND_EXPR. */ static void gimple_push_condition (void) { #ifdef ENABLE_CHECKING if (gimplify_ctxp->conditions == 0) gcc_assert (!gimplify_ctxp->conditional_cleanups); #endif ++(gimplify_ctxp->conditions); } /* Note that we've left a COND_EXPR. If we're back at unconditional scope now, add any conditional cleanups we've seen to the prequeue. */ static void gimple_pop_condition (tree *pre_p) { int conds = --(gimplify_ctxp->conditions); gcc_assert (conds >= 0); if (conds == 0) { append_to_statement_list (gimplify_ctxp->conditional_cleanups, pre_p); gimplify_ctxp->conditional_cleanups = NULL_TREE; } } /* A stable comparison routine for use with splay trees and DECLs. */ static int splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb) { tree a = (tree) xa; tree b = (tree) xb; return DECL_UID (a) - DECL_UID (b); } /* Create a new omp construct that deals with variable remapping. */ static struct gimplify_omp_ctx * new_omp_context (bool is_parallel, bool is_combined_parallel) { struct gimplify_omp_ctx *c; c = XCNEW (struct gimplify_omp_ctx); c->outer_context = gimplify_omp_ctxp; c->variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0); c->privatized_types = pointer_set_create (); c->location = input_location; c->is_parallel = is_parallel; c->is_combined_parallel = is_combined_parallel; c->default_kind = OMP_CLAUSE_DEFAULT_SHARED; return c; } /* Destroy an omp construct that deals with variable remapping. */ static void delete_omp_context (struct gimplify_omp_ctx *c) { splay_tree_delete (c->variables); pointer_set_destroy (c->privatized_types); XDELETE (c); } static void omp_add_variable (struct gimplify_omp_ctx *, tree, unsigned int); static bool omp_notice_variable (struct gimplify_omp_ctx *, tree, bool); /* A subroutine of append_to_statement_list{,_force}. T is not NULL. */ static void append_to_statement_list_1 (tree t, tree *list_p) { tree list = *list_p; tree_stmt_iterator i; if (!list) { if (t && TREE_CODE (t) == STATEMENT_LIST) { *list_p = t; return; } *list_p = list = alloc_stmt_list (); } i = tsi_last (list); tsi_link_after (&i, t, TSI_CONTINUE_LINKING); } /* Add T to the end of the list container pointed to by LIST_P. If T is an expression with no effects, it is ignored. */ void append_to_statement_list (tree t, tree *list_p) { if (t && TREE_SIDE_EFFECTS (t)) append_to_statement_list_1 (t, list_p); } /* Similar, but the statement is always added, regardless of side effects. */ void append_to_statement_list_force (tree t, tree *list_p) { if (t != NULL_TREE) append_to_statement_list_1 (t, list_p); } /* Both gimplify the statement T and append it to LIST_P. */ void gimplify_and_add (tree t, tree *list_p) { gimplify_stmt (&t); append_to_statement_list (t, list_p); } /* Strip off a legitimate source ending from the input string NAME of length LEN. Rather than having to know the names used by all of our front ends, we strip off an ending of a period followed by up to five characters. (Java uses ".class".) */ static inline void remove_suffix (char *name, int len) { int i; for (i = 2; i < 8 && len > i; i++) { if (name[len - i] == '.') { name[len - i] = '\0'; break; } } } /* Create a nameless artificial label and put it in the current function context. Returns the newly created label. */ tree create_artificial_label (void) { tree lab = build_decl (LABEL_DECL, NULL_TREE, void_type_node); DECL_ARTIFICIAL (lab) = 1; DECL_IGNORED_P (lab) = 1; DECL_CONTEXT (lab) = current_function_decl; return lab; } /* Subroutine for find_single_pointer_decl. */ static tree find_single_pointer_decl_1 (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data) { tree *pdecl = (tree *) data; if (DECL_P (*tp) && POINTER_TYPE_P (TREE_TYPE (*tp))) { if (*pdecl) { /* We already found a pointer decl; return anything other than NULL_TREE to unwind from walk_tree signalling that we have a duplicate. */ return *tp; } *pdecl = *tp; } return NULL_TREE; } /* Find the single DECL of pointer type in the tree T and return it. If there are zero or more than one such DECLs, return NULL. */ static tree find_single_pointer_decl (tree t) { tree decl = NULL_TREE; if (walk_tree (&t, find_single_pointer_decl_1, &decl, NULL)) { /* find_single_pointer_decl_1 returns a nonzero value, causing walk_tree to return a nonzero value, to indicate that it found more than one pointer DECL. */ return NULL_TREE; } return decl; } /* Create a new temporary name with PREFIX. Returns an identifier. */ static GTY(()) unsigned int tmp_var_id_num; tree create_tmp_var_name (const char *prefix) { char *tmp_name; if (prefix) { char *preftmp = ASTRDUP (prefix); remove_suffix (preftmp, strlen (preftmp)); prefix = preftmp; } ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix ? prefix : "T", tmp_var_id_num++); return get_identifier (tmp_name); } /* Create a new temporary variable declaration of type TYPE. Does NOT push it into the current binding. */ tree create_tmp_var_raw (tree type, const char *prefix) { tree tmp_var; tree new_type; /* Make the type of the variable writable. */ new_type = build_type_variant (type, 0, 0); TYPE_ATTRIBUTES (new_type) = TYPE_ATTRIBUTES (type); tmp_var = build_decl (VAR_DECL, prefix ? create_tmp_var_name (prefix) : NULL, type); /* The variable was declared by the compiler. */ DECL_ARTIFICIAL (tmp_var) = 1; /* And we don't want debug info for it. */ DECL_IGNORED_P (tmp_var) = 1; /* Make the variable writable. */ TREE_READONLY (tmp_var) = 0; DECL_EXTERNAL (tmp_var) = 0; TREE_STATIC (tmp_var) = 0; TREE_USED (tmp_var) = 1; return tmp_var; } /* Create a new temporary variable declaration of type TYPE. DOES push the variable into the current binding. Further, assume that this is called only from gimplification or optimization, at which point the creation of certain types are bugs. */ tree create_tmp_var (tree type, const char *prefix) { tree tmp_var; /* We don't allow types that are addressable (meaning we can't make copies), or incomplete. We also used to reject every variable size objects here, but now support those for which a constant upper bound can be obtained. The processing for variable sizes is performed in gimple_add_tmp_var, point at which it really matters and possibly reached via paths not going through this function, e.g. after direct calls to create_tmp_var_raw. */ gcc_assert (!TREE_ADDRESSABLE (type) && COMPLETE_TYPE_P (type)); tmp_var = create_tmp_var_raw (type, prefix); gimple_add_tmp_var (tmp_var); return tmp_var; } /* Given a tree, try to return a useful variable name that we can use to prefix a temporary that is being assigned the value of the tree. I.E. given <temp> = &A, return A. */ const char * get_name (tree t) { tree stripped_decl; stripped_decl = t; STRIP_NOPS (stripped_decl); if (DECL_P (stripped_decl) && DECL_NAME (stripped_decl)) return IDENTIFIER_POINTER (DECL_NAME (stripped_decl)); else { switch (TREE_CODE (stripped_decl)) { case ADDR_EXPR: return get_name (TREE_OPERAND (stripped_decl, 0)); break; default: return NULL; } } } /* Create a temporary with a name derived from VAL. Subroutine of lookup_tmp_var; nobody else should call this function. */ static inline tree create_tmp_from_val (tree val) { return create_tmp_var (TYPE_MAIN_VARIANT (TREE_TYPE (val)), get_name (val)); } /* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse an existing expression temporary. */ static tree lookup_tmp_var (tree val, bool is_formal) { tree ret; /* If not optimizing, never really reuse a temporary. local-alloc won't allocate any variable that is used in more than one basic block, which means it will go into memory, causing much extra work in reload and final and poorer code generation, outweighing the extra memory allocation here. */ if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val)) ret = create_tmp_from_val (val); else { elt_t elt, *elt_p; void **slot; elt.val = val; slot = htab_find_slot (gimplify_ctxp->temp_htab, (void *)&elt, INSERT); if (*slot == NULL) { elt_p = XNEW (elt_t); elt_p->val = val; elt_p->temp = ret = create_tmp_from_val (val); *slot = (void *) elt_p; } else { elt_p = (elt_t *) *slot; ret = elt_p->temp; } } if (is_formal) DECL_GIMPLE_FORMAL_TEMP_P (ret) = 1; return ret; } /* Returns a formal temporary variable initialized with VAL. PRE_P is as in gimplify_expr. Only use this function if: 1) The value of the unfactored expression represented by VAL will not change between the initialization and use of the temporary, and 2) The temporary will not be otherwise modified. For instance, #1 means that this is inappropriate for SAVE_EXPR temps, and #2 means it is inappropriate for && temps. For other cases, use get_initialized_tmp_var instead. */ static tree internal_get_tmp_var (tree val, tree *pre_p, tree *post_p, bool is_formal) { tree t, mod; gimplify_expr (&val, pre_p, post_p, is_gimple_formal_tmp_rhs, fb_rvalue); t = lookup_tmp_var (val, is_formal); if (is_formal) { tree u = find_single_pointer_decl (val); if (u && TREE_CODE (u) == VAR_DECL && DECL_BASED_ON_RESTRICT_P (u)) u = DECL_GET_RESTRICT_BASE (u); if (u && TYPE_RESTRICT (TREE_TYPE (u))) { if (DECL_BASED_ON_RESTRICT_P (t)) gcc_assert (u == DECL_GET_RESTRICT_BASE (t)); else { DECL_BASED_ON_RESTRICT_P (t) = 1; SET_DECL_RESTRICT_BASE (t, u); } } } if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; mod = build2 (INIT_EXPR, TREE_TYPE (t), t, val); if (EXPR_HAS_LOCATION (val)) SET_EXPR_LOCUS (mod, EXPR_LOCUS (val)); else SET_EXPR_LOCATION (mod, input_location); /* gimplify_modify_expr might want to reduce this further. */ gimplify_and_add (mod, pre_p); /* If we're gimplifying into ssa, gimplify_modify_expr will have given our temporary an ssa name. Find and return it. */ if (gimplify_ctxp->into_ssa) t = TREE_OPERAND (mod, 0); return t; } /* Returns a formal temporary variable initialized with VAL. PRE_P points to a statement list where side-effects needed to compute VAL should be stored. */ tree get_formal_tmp_var (tree val, tree *pre_p) { return internal_get_tmp_var (val, pre_p, NULL, true); } /* Returns a temporary variable initialized with VAL. PRE_P and POST_P are as in gimplify_expr. */ tree get_initialized_tmp_var (tree val, tree *pre_p, tree *post_p) { return internal_get_tmp_var (val, pre_p, post_p, false); } /* Declares all the variables in VARS in SCOPE. If DEBUG_INFO is true, generate debug info for them; otherwise don't. */ void declare_vars (tree vars, tree scope, bool debug_info) { tree last = vars; if (last) { tree temps, block; /* C99 mode puts the default 'return 0;' for main outside the outer braces. So drill down until we find an actual scope. */ while (TREE_CODE (scope) == COMPOUND_EXPR) scope = TREE_OPERAND (scope, 0); gcc_assert (TREE_CODE (scope) == BIND_EXPR); temps = nreverse (last); block = BIND_EXPR_BLOCK (scope); if (!block || !debug_info) { TREE_CHAIN (last) = BIND_EXPR_VARS (scope); BIND_EXPR_VARS (scope) = temps; } else { /* We need to attach the nodes both to the BIND_EXPR and to its associated BLOCK for debugging purposes. The key point here is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */ if (BLOCK_VARS (block)) BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps); else { BIND_EXPR_VARS (scope) = chainon (BIND_EXPR_VARS (scope), temps); BLOCK_VARS (block) = temps; } } } } /* For VAR a VAR_DECL of variable size, try to find a constant upper bound for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if no such upper bound can be obtained. */ static void force_constant_size (tree var) { /* The only attempt we make is by querying the maximum size of objects of the variable's type. */ HOST_WIDE_INT max_size; gcc_assert (TREE_CODE (var) == VAR_DECL); max_size = max_int_size_in_bytes (TREE_TYPE (var)); gcc_assert (max_size >= 0); DECL_SIZE_UNIT (var) = build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size); DECL_SIZE (var) = build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT); } void gimple_add_tmp_var (tree tmp) { gcc_assert (!TREE_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp)); /* Later processing assumes that the object size is constant, which might not be true at this point. Force the use of a constant upper bound in this case. */ if (!host_integerp (DECL_SIZE_UNIT (tmp), 1)) force_constant_size (tmp); DECL_CONTEXT (tmp) = current_function_decl; DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1; if (gimplify_ctxp) { TREE_CHAIN (tmp) = gimplify_ctxp->temps; gimplify_ctxp->temps = tmp; /* Mark temporaries local within the nearest enclosing parallel. */ if (gimplify_omp_ctxp) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; while (ctx && !ctx->is_parallel) ctx = ctx->outer_context; if (ctx) omp_add_variable (ctx, tmp, GOVD_LOCAL | GOVD_SEEN); } } else if (cfun) record_vars (tmp); else declare_vars (tmp, DECL_SAVED_TREE (current_function_decl), false); } /* Determines whether to assign a locus to the statement STMT. */ static bool should_carry_locus_p (tree stmt) { /* Don't emit a line note for a label. We particularly don't want to emit one for the break label, since it doesn't actually correspond to the beginning of the loop/switch. */ if (TREE_CODE (stmt) == LABEL_EXPR) return false; /* Do not annotate empty statements, since it confuses gcov. */ if (!TREE_SIDE_EFFECTS (stmt)) return false; return true; } static void annotate_one_with_locus (tree t, location_t locus) { if (EXPR_P (t) && ! EXPR_HAS_LOCATION (t) && should_carry_locus_p (t)) SET_EXPR_LOCATION (t, locus); } void annotate_all_with_locus (tree *stmt_p, location_t locus) { tree_stmt_iterator i; if (!*stmt_p) return; for (i = tsi_start (*stmt_p); !tsi_end_p (i); tsi_next (&i)) { tree t = tsi_stmt (i); /* Assuming we've already been gimplified, we shouldn't see nested chaining constructs anymore. */ gcc_assert (TREE_CODE (t) != STATEMENT_LIST && TREE_CODE (t) != COMPOUND_EXPR); annotate_one_with_locus (t, locus); } } /* Similar to copy_tree_r() but do not copy SAVE_EXPR or TARGET_EXPR nodes. These nodes model computations that should only be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. */ static tree mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data) { enum tree_code code = TREE_CODE (*tp); /* Don't unshare types, decls, constants and SAVE_EXPR nodes. */ if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant || code == SAVE_EXPR || code == TARGET_EXPR /* We can't do anything sensible with a BLOCK used as an expression, but we also can't just die when we see it because of non-expression uses. So just avert our eyes and cross our fingers. Silly Java. */ || code == BLOCK) *walk_subtrees = 0; else { gcc_assert (code != BIND_EXPR); copy_tree_r (tp, walk_subtrees, data); } return NULL_TREE; } /* Callback for walk_tree to unshare most of the shared trees rooted at *TP. If *TP has been visited already (i.e., TREE_VISITED (*TP) == 1), then *TP is deep copied by calling copy_tree_r. This unshares the same trees as copy_tree_r with the exception of SAVE_EXPR nodes. These nodes model computations that should only be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. */ static tree copy_if_shared_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { tree t = *tp; enum tree_code code = TREE_CODE (t); /* Skip types, decls, and constants. But we do want to look at their types and the bounds of types. Mark them as visited so we properly unmark their subtrees on the unmark pass. If we've already seen them, don't look down further. */ if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant) { if (TREE_VISITED (t)) *walk_subtrees = 0; else TREE_VISITED (t) = 1; } /* If this node has been visited already, unshare it and don't look any deeper. */ else if (TREE_VISITED (t)) { walk_tree (tp, mostly_copy_tree_r, NULL, NULL); *walk_subtrees = 0; } /* Otherwise, mark the tree as visited and keep looking. */ else TREE_VISITED (t) = 1; return NULL_TREE; } static tree unmark_visited_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { if (TREE_VISITED (*tp)) TREE_VISITED (*tp) = 0; else *walk_subtrees = 0; return NULL_TREE; } /* Unshare all the trees in BODY_P, a pointer into the body of FNDECL, and the bodies of any nested functions if we are unsharing the entire body of FNDECL. */ static void unshare_body (tree *body_p, tree fndecl) { struct cgraph_node *cgn = cgraph_node (fndecl); walk_tree (body_p, copy_if_shared_r, NULL, NULL); if (body_p == &DECL_SAVED_TREE (fndecl)) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unshare_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl); } /* Likewise, but mark all trees as not visited. */ static void unvisit_body (tree *body_p, tree fndecl) { struct cgraph_node *cgn = cgraph_node (fndecl); walk_tree (body_p, unmark_visited_r, NULL, NULL); if (body_p == &DECL_SAVED_TREE (fndecl)) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unvisit_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl); } /* Unshare T and all the trees reached from T via TREE_CHAIN. */ static void unshare_all_trees (tree t) { walk_tree (&t, copy_if_shared_r, NULL, NULL); walk_tree (&t, unmark_visited_r, NULL, NULL); } /* Unconditionally make an unshared copy of EXPR. This is used when using stored expressions which span multiple functions, such as BINFO_VTABLE, as the normal unsharing process can't tell that they're shared. */ tree unshare_expr (tree expr) { walk_tree (&expr, mostly_copy_tree_r, NULL, NULL); return expr; } /* A terser interface for building a representation of an exception specification. */ tree gimple_build_eh_filter (tree body, tree allowed, tree failure) { tree t; /* FIXME should the allowed types go in TREE_TYPE? */ t = build2 (EH_FILTER_EXPR, void_type_node, allowed, NULL_TREE); append_to_statement_list (failure, &EH_FILTER_FAILURE (t)); t = build2 (TRY_CATCH_EXPR, void_type_node, NULL_TREE, t); append_to_statement_list (body, &TREE_OPERAND (t, 0)); return t; } /* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both contain statements and have a value. Assign its value to a temporary and give it void_type_node. Returns the temporary, or NULL_TREE if WRAPPER was already void. */ tree voidify_wrapper_expr (tree wrapper, tree temp) { tree type = TREE_TYPE (wrapper); if (type && !VOID_TYPE_P (type)) { tree *p; /* Set p to point to the body of the wrapper. Loop until we find something that isn't a wrapper. */ for (p = &wrapper; p && *p; ) { switch (TREE_CODE (*p)) { case BIND_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; /* For a BIND_EXPR, the body is operand 1. */ p = &BIND_EXPR_BODY (*p); break; case CLEANUP_POINT_EXPR: case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = &TREE_OPERAND (*p, 0); break; case STATEMENT_LIST: { tree_stmt_iterator i = tsi_last (*p); TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i); } break; case COMPOUND_EXPR: /* Advance to the last statement. Set all container types to void. */ for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1)) { TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; } break; default: goto out; } } out: if (p == NULL || IS_EMPTY_STMT (*p)) temp = NULL_TREE; else if (temp) { /* The wrapper is on the RHS of an assignment that we're pushing down. */ gcc_assert (TREE_CODE (temp) == INIT_EXPR || TREE_CODE (temp) == MODIFY_EXPR); TREE_OPERAND (temp, 1) = *p; *p = temp; } else { temp = create_tmp_var (type, "retval"); *p = build2 (INIT_EXPR, type, temp, *p); } return temp; } return NULL_TREE; } /* Prepare calls to builtins to SAVE and RESTORE the stack as well as a temporary through which they communicate. */ static void build_stack_save_restore (tree *save, tree *restore) { tree save_call, tmp_var; save_call = build_function_call_expr (implicit_built_in_decls[BUILT_IN_STACK_SAVE], NULL_TREE); tmp_var = create_tmp_var (ptr_type_node, "saved_stack"); *save = build2 (MODIFY_EXPR, ptr_type_node, tmp_var, save_call); *restore = build_function_call_expr (implicit_built_in_decls[BUILT_IN_STACK_RESTORE], tree_cons (NULL_TREE, tmp_var, NULL_TREE)); } /* Gimplify a BIND_EXPR. Just voidify and recurse. */ static enum gimplify_status gimplify_bind_expr (tree *expr_p, tree *pre_p) { tree bind_expr = *expr_p; bool old_save_stack = gimplify_ctxp->save_stack; tree t; tree temp = voidify_wrapper_expr (bind_expr, NULL); /* Mark variables seen in this bind expr. */ for (t = BIND_EXPR_VARS (bind_expr); t ; t = TREE_CHAIN (t)) { if (TREE_CODE (t) == VAR_DECL) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; /* Mark variable as local. */ if (ctx && !is_global_var (t) && (! DECL_SEEN_IN_BIND_EXPR_P (t) || splay_tree_lookup (ctx->variables, (splay_tree_key) t) == NULL)) omp_add_variable (gimplify_omp_ctxp, t, GOVD_LOCAL | GOVD_SEEN); DECL_SEEN_IN_BIND_EXPR_P (t) = 1; } /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE && !TREE_THIS_VOLATILE (t) && (TREE_CODE (t) == VAR_DECL && !DECL_HARD_REGISTER (t)) && !needs_to_live_in_memory (t)) DECL_COMPLEX_GIMPLE_REG_P (t) = 1; } gimple_push_bind_expr (bind_expr); gimplify_ctxp->save_stack = false; gimplify_to_stmt_list (&BIND_EXPR_BODY (bind_expr)); if (gimplify_ctxp->save_stack) { tree stack_save, stack_restore; /* Save stack on entry and restore it on exit. Add a try_finally block to achieve this. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ build_stack_save_restore (&stack_save, &stack_restore); t = build2 (TRY_FINALLY_EXPR, void_type_node, BIND_EXPR_BODY (bind_expr), NULL_TREE); append_to_statement_list (stack_restore, &TREE_OPERAND (t, 1)); BIND_EXPR_BODY (bind_expr) = NULL_TREE; append_to_statement_list (stack_save, &BIND_EXPR_BODY (bind_expr)); append_to_statement_list (t, &BIND_EXPR_BODY (bind_expr)); } gimplify_ctxp->save_stack = old_save_stack; gimple_pop_bind_expr (); if (temp) { *expr_p = temp; append_to_statement_list (bind_expr, pre_p); return GS_OK; } else return GS_ALL_DONE; } /* Gimplify a RETURN_EXPR. If the expression to be returned is not a GIMPLE value, it is assigned to a new temporary and the statement is re-written to return the temporary. PRE_P points to the list where side effects that must happen before STMT should be stored. */ static enum gimplify_status gimplify_return_expr (tree stmt, tree *pre_p) { tree ret_expr = TREE_OPERAND (stmt, 0); tree result_decl, result; if (!ret_expr || TREE_CODE (ret_expr) == RESULT_DECL || ret_expr == error_mark_node) return GS_ALL_DONE; if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))) result_decl = NULL_TREE; else { result_decl = TREE_OPERAND (ret_expr, 0); if (TREE_CODE (result_decl) == INDIRECT_REF) /* See through a return by reference. */ result_decl = TREE_OPERAND (result_decl, 0); gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR || TREE_CODE (ret_expr) == INIT_EXPR) && TREE_CODE (result_decl) == RESULT_DECL); } /* If aggregate_value_p is true, then we can return the bare RESULT_DECL. Recall that aggregate_value_p is FALSE for any aggregate type that is returned in registers. If we're returning values in registers, then we don't want to extend the lifetime of the RESULT_DECL, particularly across another call. In addition, for those aggregates for which hard_function_value generates a PARALLEL, we'll die during normal expansion of structure assignments; there's special code in expand_return to handle this case that does not exist in expand_expr. */ if (!result_decl || aggregate_value_p (result_decl, TREE_TYPE (current_function_decl))) result = result_decl; else if (gimplify_ctxp->return_temp) result = gimplify_ctxp->return_temp; else { result = create_tmp_var (TREE_TYPE (result_decl), NULL); /* ??? With complex control flow (usually involving abnormal edges), we can wind up warning about an uninitialized value for this. Due to how this variable is constructed and initialized, this is never true. Give up and never warn. */ TREE_NO_WARNING (result) = 1; gimplify_ctxp->return_temp = result; } /* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use. Then gimplify the whole thing. */ if (result != result_decl) TREE_OPERAND (ret_expr, 0) = result; gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p); /* If we didn't use a temporary, then the result is just the result_decl. Otherwise we need a simple copy. This should already be gimple. */ if (result == result_decl) ret_expr = result; else ret_expr = build2 (MODIFY_EXPR, TREE_TYPE (result), result_decl, result); TREE_OPERAND (stmt, 0) = ret_expr; return GS_ALL_DONE; } /* Gimplifies a DECL_EXPR node *STMT_P by making any necessary allocation and initialization explicit. */ static enum gimplify_status gimplify_decl_expr (tree *stmt_p) { tree stmt = *stmt_p; tree decl = DECL_EXPR_DECL (stmt); *stmt_p = NULL_TREE; if (TREE_TYPE (decl) == error_mark_node) return GS_ERROR; if ((TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == VAR_DECL) && !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl))) gimplify_type_sizes (TREE_TYPE (decl), stmt_p); if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl)) { tree init = DECL_INITIAL (decl); if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { /* This is a variable-sized decl. Simplify its size and mark it for deferred expansion. Note that mudflap depends on the format of the emitted code: see mx_register_decls(). */ tree t, args, addr, ptr_type; gimplify_one_sizepos (&DECL_SIZE (decl), stmt_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), stmt_p); /* All occurrences of this decl in final gimplified code will be replaced by indirection. Setting DECL_VALUE_EXPR does two things: First, it lets the rest of the gimplifier know what replacement to use. Second, it lets the debug info know where to find the value. */ ptr_type = build_pointer_type (TREE_TYPE (decl)); addr = create_tmp_var (ptr_type, get_name (decl)); DECL_IGNORED_P (addr) = 0; t = build_fold_indirect_ref (addr); SET_DECL_VALUE_EXPR (decl, t); DECL_HAS_VALUE_EXPR_P (decl) = 1; args = tree_cons (NULL, DECL_SIZE_UNIT (decl), NULL); t = built_in_decls[BUILT_IN_ALLOCA]; t = build_function_call_expr (t, args); t = fold_convert (ptr_type, t); t = build2 (MODIFY_EXPR, void_type_node, addr, t); gimplify_and_add (t, stmt_p); /* Indicate that we need to restore the stack level when the enclosing BIND_EXPR is exited. */ gimplify_ctxp->save_stack = true; } if (init && init != error_mark_node) { if (!TREE_STATIC (decl)) { DECL_INITIAL (decl) = NULL_TREE; init = build2 (INIT_EXPR, void_type_node, decl, init); gimplify_and_add (init, stmt_p); } else /* We must still examine initializers for static variables as they may contain a label address. */ walk_tree (&init, force_labels_r, NULL, NULL); } /* Some front ends do not explicitly declare all anonymous artificial variables. We compensate here by declaring the variables, though it would be better if the front ends would explicitly declare them. */ if (!DECL_SEEN_IN_BIND_EXPR_P (decl) && DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE) gimple_add_tmp_var (decl); } return GS_ALL_DONE; } /* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body and replacing the LOOP_EXPR with goto, but if the loop contains an EXIT_EXPR, we need to append a label for it to jump to. */ static enum gimplify_status gimplify_loop_expr (tree *expr_p, tree *pre_p) { tree saved_label = gimplify_ctxp->exit_label; tree start_label = build1 (LABEL_EXPR, void_type_node, NULL_TREE); tree jump_stmt = build_and_jump (&LABEL_EXPR_LABEL (start_label)); append_to_statement_list (start_label, pre_p); gimplify_ctxp->exit_label = NULL_TREE; gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p); if (gimplify_ctxp->exit_label) { append_to_statement_list (jump_stmt, pre_p); *expr_p = build1 (LABEL_EXPR, void_type_node, gimplify_ctxp->exit_label); } else *expr_p = jump_stmt; gimplify_ctxp->exit_label = saved_label; return GS_ALL_DONE; } /* Compare two case labels. Because the front end should already have made sure that case ranges do not overlap, it is enough to only compare the CASE_LOW values of each case label. */ static int compare_case_labels (const void *p1, const void *p2) { tree case1 = *(tree *)p1; tree case2 = *(tree *)p2; return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2)); } /* Sort the case labels in LABEL_VEC in place in ascending order. */ void sort_case_labels (tree label_vec) { size_t len = TREE_VEC_LENGTH (label_vec); tree default_case = TREE_VEC_ELT (label_vec, len - 1); if (CASE_LOW (default_case)) { size_t i; /* The last label in the vector should be the default case but it is not. */ for (i = 0; i < len; ++i) { tree t = TREE_VEC_ELT (label_vec, i); if (!CASE_LOW (t)) { default_case = t; TREE_VEC_ELT (label_vec, i) = TREE_VEC_ELT (label_vec, len - 1); TREE_VEC_ELT (label_vec, len - 1) = default_case; break; } } } qsort (&TREE_VEC_ELT (label_vec, 0), len - 1, sizeof (tree), compare_case_labels); } /* Gimplify a SWITCH_EXPR, and collect a TREE_VEC of the labels it can branch to. */ static enum gimplify_status gimplify_switch_expr (tree *expr_p, tree *pre_p) { tree switch_expr = *expr_p; enum gimplify_status ret; ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val, fb_rvalue); if (SWITCH_BODY (switch_expr)) { VEC(tree,heap) *labels, *saved_labels; tree label_vec, default_case = NULL_TREE; size_t i, len; /* If someone can be bothered to fill in the labels, they can be bothered to null out the body too. */ gcc_assert (!SWITCH_LABELS (switch_expr)); saved_labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = VEC_alloc (tree, heap, 8); gimplify_to_stmt_list (&SWITCH_BODY (switch_expr)); labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = saved_labels; i = 0; while (i < VEC_length (tree, labels)) { tree elt = VEC_index (tree, labels, i); tree low = CASE_LOW (elt); bool remove_element = FALSE; if (low) { /* Discard empty ranges. */ tree high = CASE_HIGH (elt); if (high && INT_CST_LT (high, low)) remove_element = TRUE; } else { /* The default case must be the last label in the list. */ gcc_assert (!default_case); default_case = elt; remove_element = TRUE; } if (remove_element) VEC_ordered_remove (tree, labels, i); else i++; } len = i; label_vec = make_tree_vec (len + 1); SWITCH_LABELS (*expr_p) = label_vec; append_to_statement_list (switch_expr, pre_p); if (! default_case) { /* If the switch has no default label, add one, so that we jump around the switch body. */ default_case = build3 (CASE_LABEL_EXPR, void_type_node, NULL_TREE, NULL_TREE, create_artificial_label ()); append_to_statement_list (SWITCH_BODY (switch_expr), pre_p); *expr_p = build1 (LABEL_EXPR, void_type_node, CASE_LABEL (default_case)); } else *expr_p = SWITCH_BODY (switch_expr); for (i = 0; i < len; ++i) TREE_VEC_ELT (label_vec, i) = VEC_index (tree, labels, i); TREE_VEC_ELT (label_vec, len) = default_case; VEC_free (tree, heap, labels); sort_case_labels (label_vec); SWITCH_BODY (switch_expr) = NULL; } else gcc_assert (SWITCH_LABELS (switch_expr)); return ret; } static enum gimplify_status gimplify_case_label_expr (tree *expr_p) { tree expr = *expr_p; struct gimplify_ctx *ctxp; /* Invalid OpenMP programs can play Duff's Device type games with #pragma omp parallel. At least in the C front end, we don't detect such invalid branches until after gimplification. */ for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context) if (ctxp->case_labels) break; VEC_safe_push (tree, heap, ctxp->case_labels, expr); *expr_p = build1 (LABEL_EXPR, void_type_node, CASE_LABEL (expr)); return GS_ALL_DONE; } /* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first if necessary. */ tree build_and_jump (tree *label_p) { if (label_p == NULL) /* If there's nowhere to jump, just fall through. */ return NULL_TREE; if (*label_p == NULL_TREE) { tree label = create_artificial_label (); *label_p = label; } return build1 (GOTO_EXPR, void_type_node, *label_p); } /* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR. This also involves building a label to jump to and communicating it to gimplify_loop_expr through gimplify_ctxp->exit_label. */ static enum gimplify_status gimplify_exit_expr (tree *expr_p) { tree cond = TREE_OPERAND (*expr_p, 0); tree expr; expr = build_and_jump (&gimplify_ctxp->exit_label); expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE); *expr_p = expr; return GS_OK; } /* A helper function to be called via walk_tree. Mark all labels under *TP as being forced. To be called for DECL_INITIAL of static variables. */ tree force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { if (TYPE_P (*tp)) *walk_subtrees = 0; if (TREE_CODE (*tp) == LABEL_DECL) FORCED_LABEL (*tp) = 1; return NULL_TREE; } /* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is different from its canonical type, wrap the whole thing inside a NOP_EXPR and force the type of the COMPONENT_REF to be the canonical type. The canonical type of a COMPONENT_REF is the type of the field being referenced--unless the field is a bit-field which can be read directly in a smaller mode, in which case the canonical type is the sign-appropriate type corresponding to that mode. */ static void canonicalize_component_ref (tree *expr_p) { tree expr = *expr_p; tree type; gcc_assert (TREE_CODE (expr) == COMPONENT_REF); if (INTEGRAL_TYPE_P (TREE_TYPE (expr))) type = TREE_TYPE (get_unwidened (expr, NULL_TREE)); else type = TREE_TYPE (TREE_OPERAND (expr, 1)); if (TREE_TYPE (expr) != type) { tree old_type = TREE_TYPE (expr); /* Set the type of the COMPONENT_REF to the underlying type. */ TREE_TYPE (expr) = type; /* And wrap the whole thing inside a NOP_EXPR. */ expr = build1 (NOP_EXPR, old_type, expr); *expr_p = expr; } } /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR by converting T array[U]; (T *)&array ==> &array[L] where L is the lower bound. For simplicity, only do this for constant lower bound. */ static void canonicalize_addr_expr (tree *expr_p) { tree expr = *expr_p; tree ctype = TREE_TYPE (expr); tree addr_expr = TREE_OPERAND (expr, 0); tree atype = TREE_TYPE (addr_expr); tree dctype, datype, ddatype, otype, obj_expr; /* Both cast and addr_expr types should be pointers. */ if (!POINTER_TYPE_P (ctype) || !POINTER_TYPE_P (atype)) return; /* The addr_expr type should be a pointer to an array. */ datype = TREE_TYPE (atype); if (TREE_CODE (datype) != ARRAY_TYPE) return; /* Both cast and addr_expr types should address the same object type. */ dctype = TREE_TYPE (ctype); ddatype = TREE_TYPE (datype); if (!lang_hooks.types_compatible_p (ddatype, dctype)) return; /* The addr_expr and the object type should match. */ obj_expr = TREE_OPERAND (addr_expr, 0); otype = TREE_TYPE (obj_expr); if (!lang_hooks.types_compatible_p (otype, datype)) return; /* The lower bound and element sizes must be constant. */ if (!TYPE_SIZE_UNIT (dctype) || TREE_CODE (TYPE_SIZE_UNIT (dctype)) != INTEGER_CST || !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype)) || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST) return; /* All checks succeeded. Build a new node to merge the cast. */ *expr_p = build4 (ARRAY_REF, dctype, obj_expr, TYPE_MIN_VALUE (TYPE_DOMAIN (datype)), NULL_TREE, NULL_TREE); *expr_p = build1 (ADDR_EXPR, ctype, *expr_p); } /* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions underneath as appropriate. */ static enum gimplify_status gimplify_conversion (tree *expr_p) { gcc_assert (TREE_CODE (*expr_p) == NOP_EXPR || TREE_CODE (*expr_p) == CONVERT_EXPR); /* Then strip away all but the outermost conversion. */ STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0)); /* And remove the outermost conversion if it's useless. */ if (tree_ssa_useless_type_conversion (*expr_p)) *expr_p = TREE_OPERAND (*expr_p, 0); /* If we still have a conversion at the toplevel, then canonicalize some constructs. */ if (TREE_CODE (*expr_p) == NOP_EXPR || TREE_CODE (*expr_p) == CONVERT_EXPR) { tree sub = TREE_OPERAND (*expr_p, 0); /* If a NOP conversion is changing the type of a COMPONENT_REF expression, then canonicalize its type now in order to expose more redundant conversions. */ if (TREE_CODE (sub) == COMPONENT_REF) canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0)); /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR. */ else if (TREE_CODE (sub) == ADDR_EXPR) canonicalize_addr_expr (expr_p); } return GS_OK; } /* Gimplify a VAR_DECL or PARM_DECL. Returns GS_OK if we expanded a DECL_VALUE_EXPR, and it's worth re-examining things. */ static enum gimplify_status gimplify_var_or_parm_decl (tree *expr_p) { tree decl = *expr_p; /* ??? If this is a local variable, and it has not been seen in any outer BIND_EXPR, then it's probably the result of a duplicate declaration, for which we've already issued an error. It would be really nice if the front end wouldn't leak these at all. Currently the only known culprit is C++ destructors, as seen in g++.old-deja/g++.jason/binding.C. */ if (TREE_CODE (decl) == VAR_DECL && !DECL_SEEN_IN_BIND_EXPR_P (decl) && !TREE_STATIC (decl) && !DECL_EXTERNAL (decl) && decl_function_context (decl) == current_function_decl) { gcc_assert (errorcount || sorrycount); return GS_ERROR; } /* When within an OpenMP context, notice uses of variables. */ if (gimplify_omp_ctxp && omp_notice_variable (gimplify_omp_ctxp, decl, true)) return GS_ALL_DONE; /* If the decl is an alias for another expression, substitute it now. */ if (DECL_HAS_VALUE_EXPR_P (decl)) { *expr_p = unshare_expr (DECL_VALUE_EXPR (decl)); return GS_OK; } return GS_ALL_DONE; } /* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR node pointed to by EXPR_P. compound_lval : min_lval '[' val ']' | min_lval '.' ID | compound_lval '[' val ']' | compound_lval '.' ID This is not part of the original SIMPLE definition, which separates array and member references, but it seems reasonable to handle them together. Also, this way we don't run into problems with union aliasing; gcc requires that for accesses through a union to alias, the union reference must be explicit, which was not always the case when we were splitting up array and member refs. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_compound_lval (tree *expr_p, tree *pre_p, tree *post_p, fallback_t fallback) { tree *p; VEC(tree,heap) *stack; enum gimplify_status ret = GS_OK, tret; int i; /* Create a stack of the subexpressions so later we can walk them in order from inner to outer. */ stack = VEC_alloc (tree, heap, 10); /* We can handle anything that get_inner_reference can deal with. */ for (p = expr_p; ; p = &TREE_OPERAND (*p, 0)) { restart: /* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */ if (TREE_CODE (*p) == INDIRECT_REF) *p = fold_indirect_ref (*p); if (handled_component_p (*p)) ; /* Expand DECL_VALUE_EXPR now. In some cases that may expose additional COMPONENT_REFs. */ else if ((TREE_CODE (*p) == VAR_DECL || TREE_CODE (*p) == PARM_DECL) && gimplify_var_or_parm_decl (p) == GS_OK) goto restart; else break; VEC_safe_push (tree, heap, stack, *p); } gcc_assert (VEC_length (tree, stack)); /* Now STACK is a stack of pointers to all the refs we've walked through and P points to the innermost expression. Java requires that we elaborated nodes in source order. That means we must gimplify the inner expression followed by each of the indices, in order. But we can't gimplify the inner expression until we deal with any variable bounds, sizes, or positions in order to deal with PLACEHOLDER_EXPRs. So we do this in three steps. First we deal with the annotations for any variables in the components, then we gimplify the base, then we gimplify any indices, from left to right. */ for (i = VEC_length (tree, stack) - 1; i >= 0; i--) { tree t = VEC_index (tree, stack, i); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the low bound and element type size and put them into the ARRAY_REF. If these values are set, they have already been gimplified. */ if (!TREE_OPERAND (t, 2)) { tree low = unshare_expr (array_ref_low_bound (t)); if (!is_gimple_min_invariant (low)) { TREE_OPERAND (t, 2) = low; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } if (!TREE_OPERAND (t, 3)) { tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); tree elmt_size = unshare_expr (array_ref_element_size (t)); tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type)); /* Divide the element size by the alignment of the element type (above). */ elmt_size = size_binop (EXACT_DIV_EXPR, elmt_size, factor); if (!is_gimple_min_invariant (elmt_size)) { TREE_OPERAND (t, 3) = elmt_size; tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } } else if (TREE_CODE (t) == COMPONENT_REF) { /* Set the field offset into T and gimplify it. */ if (!TREE_OPERAND (t, 2)) { tree offset = unshare_expr (component_ref_field_offset (t)); tree field = TREE_OPERAND (t, 1); tree factor = size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT); /* Divide the offset by its alignment. */ offset = size_binop (EXACT_DIV_EXPR, offset, factor); if (!is_gimple_min_invariant (offset)) { TREE_OPERAND (t, 2) = offset; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } } } /* Step 2 is to gimplify the base expression. Make sure lvalue is set so as to match the min_lval predicate. Failure to do so may result in the creation of large aggregate temporaries. */ tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback | fb_lvalue); ret = MIN (ret, tret); /* And finally, the indices and operands to BIT_FIELD_REF. During this loop we also remove any useless conversions. */ for (; VEC_length (tree, stack) > 0; ) { tree t = VEC_pop (tree, stack); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the dimension. Temporary fix for gcc.c-torture/execute/20040313-1.c. Gimplify non-constant array indices into a temporary variable. FIXME - The real fix is to gimplify post-modify expressions into a minimal gimple lvalue. However, that exposes bugs in alias analysis. The alias analyzer does not handle &PTR->FIELD very well. Will fix after the branch is merged into mainline (dnovillo 2004-05-03). */ if (!is_gimple_min_invariant (TREE_OPERAND (t, 1))) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_formal_tmp_reg, fb_rvalue); ret = MIN (ret, tret); } } else if (TREE_CODE (t) == BIT_FIELD_REF) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); } STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0)); /* The innermost expression P may have originally had TREE_SIDE_EFFECTS set which would have caused all the outer expressions in EXPR_P leading to P to also have had TREE_SIDE_EFFECTS set. */ recalculate_side_effects (t); } tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback); ret = MIN (ret, tret); /* If the outermost expression is a COMPONENT_REF, canonicalize its type. */ if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF) { canonicalize_component_ref (expr_p); ret = MIN (ret, GS_OK); } VEC_free (tree, heap, stack); return ret; } /* Gimplify the self modifying expression pointed to by EXPR_P (++, --, +=, -=). PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_self_mod_expr (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { enum tree_code code; tree lhs, lvalue, rhs, t1, post = NULL, *orig_post_p = post_p; bool postfix; enum tree_code arith_code; enum gimplify_status ret; code = TREE_CODE (*expr_p); gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR || code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR); /* Prefix or postfix? */ if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR) /* Faster to treat as prefix if result is not used. */ postfix = want_value; else postfix = false; /* For postfix, make sure the inner expression's post side effects are executed after side effects from this expression. */ if (postfix) post_p = &post; /* Add or subtract? */ if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) arith_code = PLUS_EXPR; else arith_code = MINUS_EXPR; /* Gimplify the LHS into a GIMPLE lvalue. */ lvalue = TREE_OPERAND (*expr_p, 0); ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; /* Extract the operands to the arithmetic operation. */ lhs = lvalue; rhs = TREE_OPERAND (*expr_p, 1); /* For postfix operator, we evaluate the LHS to an rvalue and then use that as the result value and in the postqueue operation. */ if (postfix) { ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue); if (ret == GS_ERROR) return ret; } t1 = build2 (arith_code, TREE_TYPE (*expr_p), lhs, rhs); t1 = build2 (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1); if (postfix) { gimplify_and_add (t1, orig_post_p); append_to_statement_list (post, orig_post_p); *expr_p = lhs; return GS_ALL_DONE; } else { *expr_p = t1; return GS_OK; } } /* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */ static void maybe_with_size_expr (tree *expr_p) { tree expr = *expr_p; tree type = TREE_TYPE (expr); tree size; /* If we've already wrapped this or the type is error_mark_node, we can't do anything. */ if (TREE_CODE (expr) == WITH_SIZE_EXPR || type == error_mark_node) return; /* If the size isn't known or is a constant, we have nothing to do. */ size = TYPE_SIZE_UNIT (type); if (!size || TREE_CODE (size) == INTEGER_CST) return; /* Otherwise, make a WITH_SIZE_EXPR. */ size = unshare_expr (size); size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr); *expr_p = build2 (WITH_SIZE_EXPR, type, expr, size); } /* Subroutine of gimplify_call_expr: Gimplify a single argument. */ static enum gimplify_status gimplify_arg (tree *expr_p, tree *pre_p) { bool (*test) (tree); fallback_t fb; /* In general, we allow lvalues for function arguments to avoid extra overhead of copying large aggregates out of even larger aggregates into temporaries only to copy the temporaries to the argument list. Make optimizers happy by pulling out to temporaries those types that fit in registers. */ if (is_gimple_reg_type (TREE_TYPE (*expr_p))) test = is_gimple_val, fb = fb_rvalue; else test = is_gimple_lvalue, fb = fb_either; /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (expr_p); /* There is a sequence point before a function call. Side effects in the argument list must occur before the actual call. So, when gimplifying arguments, force gimplify_expr to use an internal post queue which is then appended to the end of PRE_P. */ return gimplify_expr (expr_p, pre_p, NULL, test, fb); } /* Gimplify the CALL_EXPR node pointed to by EXPR_P. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. WANT_VALUE is true if the result of the call is desired. */ static enum gimplify_status gimplify_call_expr (tree *expr_p, tree *pre_p, bool want_value) { tree decl; tree arglist; enum gimplify_status ret; gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR); /* For reliable diagnostics during inlining, it is necessary that every call_expr be annotated with file and line. */ if (! EXPR_HAS_LOCATION (*expr_p)) SET_EXPR_LOCATION (*expr_p, input_location); /* This may be a call to a builtin function. Builtin function calls may be transformed into different (and more efficient) builtin function calls under certain circumstances. Unfortunately, gimplification can muck things up enough that the builtin expanders are not aware that certain transformations are still valid. So we attempt transformation/gimplification of the call before we gimplify the CALL_EXPR. At this time we do not manage to transform all calls in the same manner as the expanders do, but we do transform most of them. */ decl = get_callee_fndecl (*expr_p); if (decl && DECL_BUILT_IN (decl)) { tree arglist = TREE_OPERAND (*expr_p, 1); tree new = fold_builtin (decl, arglist, !want_value); if (new && new != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new; return GS_OK; } if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (decl) == BUILT_IN_VA_START) { if (!arglist || !TREE_CHAIN (arglist)) { error ("too few arguments to function %<va_start%>"); *expr_p = build_empty_stmt (); return GS_OK; } if (fold_builtin_next_arg (TREE_CHAIN (arglist))) { *expr_p = build_empty_stmt (); return GS_OK; } /* Avoid gimplifying the second argument to va_start, which needs to be the plain PARM_DECL. */ return gimplify_arg (&TREE_VALUE (TREE_OPERAND (*expr_p, 1)), pre_p); } } /* There is a sequence point before the call, so any side effects in the calling expression must occur before the actual call. Force gimplify_expr to use an internal post queue. */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, NULL, is_gimple_call_addr, fb_rvalue); if (PUSH_ARGS_REVERSED) TREE_OPERAND (*expr_p, 1) = nreverse (TREE_OPERAND (*expr_p, 1)); for (arglist = TREE_OPERAND (*expr_p, 1); arglist; arglist = TREE_CHAIN (arglist)) { enum gimplify_status t; t = gimplify_arg (&TREE_VALUE (arglist), pre_p); if (t == GS_ERROR) ret = GS_ERROR; } if (PUSH_ARGS_REVERSED) TREE_OPERAND (*expr_p, 1) = nreverse (TREE_OPERAND (*expr_p, 1)); /* Try this again in case gimplification exposed something. */ if (ret != GS_ERROR) { decl = get_callee_fndecl (*expr_p); if (decl && DECL_BUILT_IN (decl)) { tree arglist = TREE_OPERAND (*expr_p, 1); tree new = fold_builtin (decl, arglist, !want_value); if (new && new != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new; return GS_OK; } } } /* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its decl. This allows us to eliminate redundant or useless calls to "const" functions. */ if (TREE_CODE (*expr_p) == CALL_EXPR && (call_expr_flags (*expr_p) & (ECF_CONST | ECF_PURE))) TREE_SIDE_EFFECTS (*expr_p) = 0; return ret; } /* Handle shortcut semantics in the predicate operand of a COND_EXPR by rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs. TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the condition is true or false, respectively. If null, we should generate our own to skip over the evaluation of this specific expression. This function is the tree equivalent of do_jump. shortcut_cond_r should only be called by shortcut_cond_expr. */ static tree shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p) { tree local_label = NULL_TREE; tree t, expr = NULL; /* OK, it's not a simple case; we need to pull apart the COND_EXPR to retain the shortcut semantics. Just insert the gotos here; shortcut_cond_expr will append the real blocks later. */ if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { /* Turn if (a && b) into if (a); else goto no; if (b) goto yes; else goto no; (no:) */ if (false_label_p == NULL) false_label_p = &local_label; t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p); append_to_statement_list (t, &expr); t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { /* Turn if (a || b) into if (a) goto yes; if (b) goto yes; else goto no; (yes:) */ if (true_label_p == NULL) true_label_p = &local_label; t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL); append_to_statement_list (t, &expr); t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == COND_EXPR) { /* As long as we're messing with gotos, turn if (a ? b : c) into if (a) if (b) goto yes; else goto no; else if (c) goto yes; else goto no; */ expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0), shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p), shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p, false_label_p)); } else { expr = build3 (COND_EXPR, void_type_node, pred, build_and_jump (true_label_p), build_and_jump (false_label_p)); } if (local_label) { t = build1 (LABEL_EXPR, void_type_node, local_label); append_to_statement_list (t, &expr); } return expr; } static tree shortcut_cond_expr (tree expr) { tree pred = TREE_OPERAND (expr, 0); tree then_ = TREE_OPERAND (expr, 1); tree else_ = TREE_OPERAND (expr, 2); tree true_label, false_label, end_label, t; tree *true_label_p; tree *false_label_p; bool emit_end, emit_false, jump_over_else; bool then_se = then_ && TREE_SIDE_EFFECTS (then_); bool else_se = else_ && TREE_SIDE_EFFECTS (else_); /* First do simple transformations. */ if (!else_se) { /* If there is no 'else', turn (a && b) into if (a) if (b). */ while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); then_ = shortcut_cond_expr (expr); then_se = then_ && TREE_SIDE_EFFECTS (then_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE); } } if (!then_se) { /* If there is no 'then', turn if (a || b); else d into if (a); else if (b); else d. */ while (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); else_ = shortcut_cond_expr (expr); else_se = else_ && TREE_SIDE_EFFECTS (else_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_); } } /* If we're done, great. */ if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR && TREE_CODE (pred) != TRUTH_ORIF_EXPR) return expr; /* Otherwise we need to mess with gotos. Change if (a) c; else d; to if (a); else goto no; c; goto end; no: d; end: and recursively gimplify the condition. */ true_label = false_label = end_label = NULL_TREE; /* If our arms just jump somewhere, hijack those labels so we don't generate jumps to jumps. */ if (then_ && TREE_CODE (then_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL) { true_label = GOTO_DESTINATION (then_); then_ = NULL; then_se = false; } if (else_ && TREE_CODE (else_) == GOTO_EXPR && TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL) { false_label = GOTO_DESTINATION (else_); else_ = NULL; else_se = false; } /* If we aren't hijacking a label for the 'then' branch, it falls through. */ if (true_label) true_label_p = &true_label; else true_label_p = NULL; /* The 'else' branch also needs a label if it contains interesting code. */ if (false_label || else_se) false_label_p = &false_label; else false_label_p = NULL; /* If there was nothing else in our arms, just forward the label(s). */ if (!then_se && !else_se) return shortcut_cond_r (pred, true_label_p, false_label_p); /* If our last subexpression already has a terminal label, reuse it. */ if (else_se) expr = expr_last (else_); else if (then_se) expr = expr_last (then_); else expr = NULL; if (expr && TREE_CODE (expr) == LABEL_EXPR) end_label = LABEL_EXPR_LABEL (expr); /* If we don't care about jumping to the 'else' branch, jump to the end if the condition is false. */ if (!false_label_p) false_label_p = &end_label; /* We only want to emit these labels if we aren't hijacking them. */ emit_end = (end_label == NULL_TREE); emit_false = (false_label == NULL_TREE); /* We only emit the jump over the else clause if we have to--if the then clause may fall through. Otherwise we can wind up with a useless jump and a useless label at the end of gimplified code, which will cause us to think that this conditional as a whole falls through even if it doesn't. If we then inline a function which ends with such a condition, that can cause us to issue an inappropriate warning about control reaching the end of a non-void function. */ jump_over_else = block_may_fallthru (then_); pred = shortcut_cond_r (pred, true_label_p, false_label_p); expr = NULL; append_to_statement_list (pred, &expr); append_to_statement_list (then_, &expr); if (else_se) { if (jump_over_else) { t = build_and_jump (&end_label); append_to_statement_list (t, &expr); } if (emit_false) { t = build1 (LABEL_EXPR, void_type_node, false_label); append_to_statement_list (t, &expr); } append_to_statement_list (else_, &expr); } if (emit_end && end_label) { t = build1 (LABEL_EXPR, void_type_node, end_label); append_to_statement_list (t, &expr); } return expr; } /* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */ tree gimple_boolify (tree expr) { tree type = TREE_TYPE (expr); if (TREE_CODE (type) == BOOLEAN_TYPE) return expr; switch (TREE_CODE (expr)) { case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: /* Also boolify the arguments of truth exprs. */ TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1)); /* FALLTHRU */ case TRUTH_NOT_EXPR: TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* FALLTHRU */ case EQ_EXPR: case NE_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: /* These expressions always produce boolean results. */ TREE_TYPE (expr) = boolean_type_node; return expr; default: /* Other expressions that get here must have boolean values, but might need to be converted to the appropriate mode. */ return fold_convert (boolean_type_node, expr); } } /* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;' into if (p) if (p) t1 = a; a; else or else t1 = b; b; t1; The second form is used when *EXPR_P is of type void. TARGET is the tree for T1 above. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_cond_expr (tree *expr_p, tree *pre_p, fallback_t fallback) { tree expr = *expr_p; tree tmp, tmp2, type; enum gimplify_status ret; type = TREE_TYPE (expr); /* If this COND_EXPR has a value, copy the values into a temporary within the arms. */ if (! VOID_TYPE_P (type)) { tree result; if ((fallback & fb_lvalue) == 0) { result = tmp2 = tmp = create_tmp_var (TREE_TYPE (expr), "iftmp"); ret = GS_ALL_DONE; } else { tree type = build_pointer_type (TREE_TYPE (expr)); if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node) TREE_OPERAND (expr, 1) = build_fold_addr_expr (TREE_OPERAND (expr, 1)); if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node) TREE_OPERAND (expr, 2) = build_fold_addr_expr (TREE_OPERAND (expr, 2)); tmp2 = tmp = create_tmp_var (type, "iftmp"); expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (expr, 0), TREE_OPERAND (expr, 1), TREE_OPERAND (expr, 2)); result = build_fold_indirect_ref (tmp); ret = GS_ALL_DONE; } /* Build the then clause, 't1 = a;'. But don't build an assignment if this branch is void; in C++ it can be, if it's a throw. */ if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node) TREE_OPERAND (expr, 1) = build2 (MODIFY_EXPR, void_type_node, tmp, TREE_OPERAND (expr, 1)); /* Build the else clause, 't1 = b;'. */ if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node) TREE_OPERAND (expr, 2) = build2 (MODIFY_EXPR, void_type_node, tmp2, TREE_OPERAND (expr, 2)); TREE_TYPE (expr) = void_type_node; recalculate_side_effects (expr); /* Move the COND_EXPR to the prequeue. */ gimplify_and_add (expr, pre_p); *expr_p = result; return ret; } /* Make sure the condition has BOOLEAN_TYPE. */ TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* Break apart && and || conditions. */ if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR || TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR) { expr = shortcut_cond_expr (expr); if (expr != *expr_p) { *expr_p = expr; /* We can't rely on gimplify_expr to re-gimplify the expanded form properly, as cleanups might cause the target labels to be wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to set up a conditional context. */ gimple_push_condition (); gimplify_stmt (expr_p); gimple_pop_condition (pre_p); return GS_ALL_DONE; } } /* Now do the normal gimplification. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr, fb_rvalue); gimple_push_condition (); gimplify_to_stmt_list (&TREE_OPERAND (expr, 1)); gimplify_to_stmt_list (&TREE_OPERAND (expr, 2)); recalculate_side_effects (expr); gimple_pop_condition (pre_p); if (ret == GS_ERROR) ; else if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 1))) ret = GS_ALL_DONE; else if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 2))) /* Rewrite "if (a); else b" to "if (!a) b" */ { TREE_OPERAND (expr, 0) = invert_truthvalue (TREE_OPERAND (expr, 0)); ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr, fb_rvalue); tmp = TREE_OPERAND (expr, 1); TREE_OPERAND (expr, 1) = TREE_OPERAND (expr, 2); TREE_OPERAND (expr, 2) = tmp; } else /* Both arms are empty; replace the COND_EXPR with its predicate. */ expr = TREE_OPERAND (expr, 0); *expr_p = expr; return ret; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memcpy. */ static enum gimplify_status gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value) { tree args, t, to, to_ptr, from; to = TREE_OPERAND (*expr_p, 0); from = TREE_OPERAND (*expr_p, 1); args = tree_cons (NULL, size, NULL); t = build_fold_addr_expr (from); args = tree_cons (NULL, t, args); to_ptr = build_fold_addr_expr (to); args = tree_cons (NULL, to_ptr, args); t = implicit_built_in_decls[BUILT_IN_MEMCPY]; t = build_function_call_expr (t, args); if (want_value) { t = build1 (NOP_EXPR, TREE_TYPE (to_ptr), t); t = build1 (INDIRECT_REF, TREE_TYPE (to), t); } *expr_p = t; return GS_OK; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memset. In this case we know that the RHS is a CONSTRUCTOR with an empty element list. */ static enum gimplify_status gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value) { tree args, t, to, to_ptr; to = TREE_OPERAND (*expr_p, 0); args = tree_cons (NULL, size, NULL); args = tree_cons (NULL, integer_zero_node, args); to_ptr = build_fold_addr_expr (to); args = tree_cons (NULL, to_ptr, args); t = implicit_built_in_decls[BUILT_IN_MEMSET]; t = build_function_call_expr (t, args); if (want_value) { t = build1 (NOP_EXPR, TREE_TYPE (to_ptr), t); t = build1 (INDIRECT_REF, TREE_TYPE (to), t); } *expr_p = t; return GS_OK; } /* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree, determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an assignment. Returns non-null if we detect a potential overlap. */ struct gimplify_init_ctor_preeval_data { /* The base decl of the lhs object. May be NULL, in which case we have to assume the lhs is indirect. */ tree lhs_base_decl; /* The alias set of the lhs object. */ int lhs_alias_set; }; static tree gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata) { struct gimplify_init_ctor_preeval_data *data = (struct gimplify_init_ctor_preeval_data *) xdata; tree t = *tp; /* If we find the base object, obviously we have overlap. */ if (data->lhs_base_decl == t) return t; /* If the constructor component is indirect, determine if we have a potential overlap with the lhs. The only bits of information we have to go on at this point are addressability and alias sets. */ if (TREE_CODE (t) == INDIRECT_REF && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t))) return t; /* If the constructor component is a call, determine if it can hide a potential overlap with the lhs through an INDIRECT_REF like above. */ if (TREE_CODE (t) == CALL_EXPR) { tree type, fntype = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type)) if (POINTER_TYPE_P (TREE_VALUE (type)) && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (TREE_TYPE (TREE_VALUE (type))))) return t; } if (IS_TYPE_OR_DECL_P (t)) *walk_subtrees = 0; return NULL; } /* A subroutine of gimplify_init_constructor. Pre-evaluate *EXPR_P, force values that overlap with the lhs (as described by *DATA) into temporaries. */ static void gimplify_init_ctor_preeval (tree *expr_p, tree *pre_p, tree *post_p, struct gimplify_init_ctor_preeval_data *data) { enum gimplify_status one; /* If the value is invariant, then there's nothing to pre-evaluate. But ensure it doesn't have any side-effects since a SAVE_EXPR is invariant but has side effects and might contain a reference to the object we're initializing. */ if (TREE_INVARIANT (*expr_p) && !TREE_SIDE_EFFECTS (*expr_p)) return; /* If the type has non-trivial constructors, we can't pre-evaluate. */ if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p))) return; /* Recurse for nested constructors. */ if (TREE_CODE (*expr_p) == CONSTRUCTOR) { unsigned HOST_WIDE_INT ix; constructor_elt *ce; VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (*expr_p); for (ix = 0; VEC_iterate (constructor_elt, v, ix, ce); ix++) gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data); return; } /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (expr_p); /* Gimplify the constructor element to something appropriate for the rhs of a MODIFY_EXPR. Given that we know the lhs is an aggregate, we know the gimplifier will consider this a store to memory. Doing this gimplification now means that we won't have to deal with complicated language-specific trees, nor trees like SAVE_EXPR that can induce exponential search behavior. */ one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue); if (one == GS_ERROR) { *expr_p = NULL; return; } /* If we gimplified to a bare decl, we can be sure that it doesn't overlap with the lhs, since "a = { .x=a }" doesn't make sense. This will always be true for all scalars, since is_gimple_mem_rhs insists on a temporary variable for them. */ if (DECL_P (*expr_p)) return; /* If this is of variable size, we have no choice but to assume it doesn't overlap since we can't make a temporary for it. */ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST) return; /* Otherwise, we must search for overlap ... */ if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL)) return; /* ... and if found, force the value into a temporary. */ *expr_p = get_formal_tmp_var (*expr_p, pre_p); } /* A subroutine of gimplify_init_ctor_eval. Create a loop for a RANGE_EXPR in a CONSTRUCTOR for an array. var = lower; loop_entry: object[var] = value; if (var == upper) goto loop_exit; var = var + 1; goto loop_entry; loop_exit: We increment var _after_ the loop exit check because we might otherwise fail if upper == TYPE_MAX_VALUE (type for upper). Note that we never have to deal with SAVE_EXPRs here, because this has already been taken care of for us, in gimplify_init_ctor_preeval(). */ static void gimplify_init_ctor_eval (tree, VEC(constructor_elt,gc) *, tree *, bool); static void gimplify_init_ctor_eval_range (tree object, tree lower, tree upper, tree value, tree array_elt_type, tree *pre_p, bool cleared) { tree loop_entry_label, loop_exit_label; tree var, var_type, cref; loop_entry_label = create_artificial_label (); loop_exit_label = create_artificial_label (); /* Create and initialize the index variable. */ var_type = TREE_TYPE (upper); var = create_tmp_var (var_type, NULL); append_to_statement_list (build2 (MODIFY_EXPR, var_type, var, lower), pre_p); /* Add the loop entry label. */ append_to_statement_list (build1 (LABEL_EXPR, void_type_node, loop_entry_label), pre_p); /* Build the reference. */ cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), var, NULL_TREE, NULL_TREE); /* If we are a constructor, just call gimplify_init_ctor_eval to do the store. Otherwise just assign value to the reference. */ if (TREE_CODE (value) == CONSTRUCTOR) /* NB we might have to call ourself recursively through gimplify_init_ctor_eval if the value is a constructor. */ gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else append_to_statement_list (build2 (MODIFY_EXPR, TREE_TYPE (cref), cref, value), pre_p); /* We exit the loop when the index var is equal to the upper bound. */ gimplify_and_add (build3 (COND_EXPR, void_type_node, build2 (EQ_EXPR, boolean_type_node, var, upper), build1 (GOTO_EXPR, void_type_node, loop_exit_label), NULL_TREE), pre_p); /* Otherwise, increment the index var... */ append_to_statement_list (build2 (MODIFY_EXPR, var_type, var, build2 (PLUS_EXPR, var_type, var, fold_convert (var_type, integer_one_node))), pre_p); /* ...and jump back to the loop entry. */ append_to_statement_list (build1 (GOTO_EXPR, void_type_node, loop_entry_label), pre_p); /* Add the loop exit label. */ append_to_statement_list (build1 (LABEL_EXPR, void_type_node, loop_exit_label), pre_p); } /* Return true if FDECL is accessing a field that is zero sized. */ static bool zero_sized_field_decl (tree fdecl) { if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl) && integer_zerop (DECL_SIZE (fdecl))) return true; return false; } /* Return true if TYPE is zero sized. */ static bool zero_sized_type (tree type) { if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) && integer_zerop (TYPE_SIZE (type))) return true; return false; } /* A subroutine of gimplify_init_constructor. Generate individual MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the CONSTRUCTOR. CLEARED is true if the entire LHS object has been zeroed first. */ static void gimplify_init_ctor_eval (tree object, VEC(constructor_elt,gc) *elts, tree *pre_p, bool cleared) { tree array_elt_type = NULL; unsigned HOST_WIDE_INT ix; tree purpose, value; if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE) array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object))); FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value) { tree cref, init; /* NULL values are created above for gimplification errors. */ if (value == NULL) continue; if (cleared && initializer_zerop (value)) continue; /* ??? Here's to hoping the front end fills in all of the indices, so we don't have to figure out what's missing ourselves. */ gcc_assert (purpose); /* Skip zero-sized fields, unless value has side-effects. This can happen with calls to functions returning a zero-sized type, which we shouldn't discard. As a number of downstream passes don't expect sets of zero-sized fields, we rely on the gimplification of the MODIFY_EXPR we make below to drop the assignment statement. */ if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose)) continue; /* If we have a RANGE_EXPR, we have to build a loop to assign the whole range. */ if (TREE_CODE (purpose) == RANGE_EXPR) { tree lower = TREE_OPERAND (purpose, 0); tree upper = TREE_OPERAND (purpose, 1); /* If the lower bound is equal to upper, just treat it as if upper was the index. */ if (simple_cst_equal (lower, upper)) purpose = upper; else { gimplify_init_ctor_eval_range (object, lower, upper, value, array_elt_type, pre_p, cleared); continue; } } if (array_elt_type) { cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), purpose, NULL_TREE, NULL_TREE); } else { gcc_assert (TREE_CODE (purpose) == FIELD_DECL); cref = build3 (COMPONENT_REF, TREE_TYPE (purpose), unshare_expr (object), purpose, NULL_TREE); } if (TREE_CODE (value) == CONSTRUCTOR && TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE) gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else { init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value); gimplify_and_add (init, pre_p); } } } /* A subroutine of gimplify_modify_expr. Break out elements of a CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs. Note that we still need to clear any elements that don't have explicit initializers, so if not all elements are initialized we keep the original MODIFY_EXPR, we just remove all of the constructor elements. */ static enum gimplify_status gimplify_init_constructor (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { tree object; tree ctor = TREE_OPERAND (*expr_p, 1); tree type = TREE_TYPE (ctor); enum gimplify_status ret; VEC(constructor_elt,gc) *elts; if (TREE_CODE (ctor) != CONSTRUCTOR) return GS_UNHANDLED; ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; object = TREE_OPERAND (*expr_p, 0); elts = CONSTRUCTOR_ELTS (ctor); ret = GS_ALL_DONE; switch (TREE_CODE (type)) { case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: case ARRAY_TYPE: { struct gimplify_init_ctor_preeval_data preeval_data; HOST_WIDE_INT num_type_elements, num_ctor_elements; HOST_WIDE_INT num_nonzero_elements; bool cleared, valid_const_initializer; /* Aggregate types must lower constructors to initialization of individual elements. The exception is that a CONSTRUCTOR node with no elements indicates zero-initialization of the whole. */ if (VEC_empty (constructor_elt, elts)) break; /* Fetch information about the constructor to direct later processing. We might want to make static versions of it in various cases, and can only do so if it known to be a valid constant initializer. */ valid_const_initializer = categorize_ctor_elements (ctor, &num_nonzero_elements, &num_ctor_elements, &cleared); /* If a const aggregate variable is being initialized, then it should never be a lose to promote the variable to be static. */ if (valid_const_initializer && num_nonzero_elements > 1 && TREE_READONLY (object) && TREE_CODE (object) == VAR_DECL) { DECL_INITIAL (object) = ctor; TREE_STATIC (object) = 1; if (!DECL_NAME (object)) DECL_NAME (object) = create_tmp_var_name ("C"); walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL); /* ??? C++ doesn't automatically append a .<number> to the assembler name, and even when it does, it looks a FE private data structures to figure out what that number should be, which are not set for this variable. I suppose this is important for local statics for inline functions, which aren't "local" in the object file sense. So in order to get a unique TU-local symbol, we must invoke the lhd version now. */ lhd_set_decl_assembler_name (object); *expr_p = NULL_TREE; break; } /* If there are "lots" of initialized elements, even discounting those that are not address constants (and thus *must* be computed at runtime), then partition the constructor into constant and non-constant parts. Block copy the constant parts in, then generate code for the non-constant parts. */ /* TODO. There's code in cp/typeck.c to do this. */ num_type_elements = count_type_elements (type, true); /* If count_type_elements could not determine number of type elements for a constant-sized object, assume clearing is needed. Don't do this for variable-sized objects, as store_constructor will ignore the clearing of variable-sized objects. */ if (num_type_elements < 0 && int_size_in_bytes (type) >= 0) cleared = true; /* If there are "lots" of zeros, then block clear the object first. */ else if (num_type_elements - num_nonzero_elements > CLEAR_RATIO && num_nonzero_elements < num_type_elements/4) cleared = true; /* ??? This bit ought not be needed. For any element not present in the initializer, we should simply set them to zero. Except we'd need to *find* the elements that are not present, and that requires trickery to avoid quadratic compile-time behavior in large cases or excessive memory use in small cases. */ else if (num_ctor_elements < num_type_elements) cleared = true; /* If there are "lots" of initialized elements, and all of them are valid address constants, then the entire initializer can be dropped to memory, and then memcpy'd out. Don't do this for sparse arrays, though, as it's more efficient to follow the standard CONSTRUCTOR behavior of memset followed by individual element initialization. */ if (valid_const_initializer && !cleared) { HOST_WIDE_INT size = int_size_in_bytes (type); unsigned int align; /* ??? We can still get unbounded array types, at least from the C++ front end. This seems wrong, but attempt to work around it for now. */ if (size < 0) { size = int_size_in_bytes (TREE_TYPE (object)); if (size >= 0) TREE_TYPE (ctor) = type = TREE_TYPE (object); } /* Find the maximum alignment we can assume for the object. */ /* ??? Make use of DECL_OFFSET_ALIGN. */ if (DECL_P (object)) align = DECL_ALIGN (object); else align = TYPE_ALIGN (type); if (size > 0 && !can_move_by_pieces (size, align)) { tree new = create_tmp_var_raw (type, "C"); gimple_add_tmp_var (new); TREE_STATIC (new) = 1; TREE_READONLY (new) = 1; DECL_INITIAL (new) = ctor; if (align > DECL_ALIGN (new)) { DECL_ALIGN (new) = align; DECL_USER_ALIGN (new) = 1; } walk_tree (&DECL_INITIAL (new), force_labels_r, NULL, NULL); TREE_OPERAND (*expr_p, 1) = new; /* This is no longer an assignment of a CONSTRUCTOR, but we still may have processing to do on the LHS. So pretend we didn't do anything here to let that happen. */ return GS_UNHANDLED; } } /* If there are nonzero elements, pre-evaluate to capture elements overlapping with the lhs into temporaries. We must do this before clearing to fetch the values before they are zeroed-out. */ if (num_nonzero_elements > 0) { preeval_data.lhs_base_decl = get_base_address (object); if (!DECL_P (preeval_data.lhs_base_decl)) preeval_data.lhs_base_decl = NULL; preeval_data.lhs_alias_set = get_alias_set (object); gimplify_init_ctor_preeval (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, &preeval_data); } if (cleared) { /* Zap the CONSTRUCTOR element list, which simplifies this case. Note that we still have to gimplify, in order to handle the case of variable sized types. Avoid shared tree structures. */ CONSTRUCTOR_ELTS (ctor) = NULL; object = unshare_expr (object); gimplify_stmt (expr_p); append_to_statement_list (*expr_p, pre_p); } /* If we have not block cleared the object, or if there are nonzero elements in the constructor, add assignments to the individual scalar fields of the object. */ if (!cleared || num_nonzero_elements > 0) gimplify_init_ctor_eval (object, elts, pre_p, cleared); *expr_p = NULL_TREE; } break; case COMPLEX_TYPE: { tree r, i; /* Extract the real and imaginary parts out of the ctor. */ gcc_assert (VEC_length (constructor_elt, elts) == 2); r = VEC_index (constructor_elt, elts, 0)->value; i = VEC_index (constructor_elt, elts, 1)->value; if (r == NULL || i == NULL) { tree zero = fold_convert (TREE_TYPE (type), integer_zero_node); if (r == NULL) r = zero; if (i == NULL) i = zero; } /* Complex types have either COMPLEX_CST or COMPLEX_EXPR to represent creation of a complex value. */ if (TREE_CONSTANT (r) && TREE_CONSTANT (i)) { ctor = build_complex (type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; } else { ctor = build2 (COMPLEX_EXPR, type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, rhs_predicate_for (TREE_OPERAND (*expr_p, 0)), fb_rvalue); } } break; case VECTOR_TYPE: { unsigned HOST_WIDE_INT ix; constructor_elt *ce; /* Go ahead and simplify constant constructors to VECTOR_CST. */ if (TREE_CONSTANT (ctor)) { bool constant_p = true; tree value; /* Even when ctor is constant, it might contain non-*_CST elements (e.g. { 1.0/0.0 - 1.0/0.0, 0.0 }) and those don't belong into VECTOR_CST nodes. */ FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value) if (!CONSTANT_CLASS_P (value)) { constant_p = false; break; } if (constant_p) { TREE_OPERAND (*expr_p, 1) = build_vector_from_ctor (type, elts); break; } /* Don't reduce a TREE_CONSTANT vector ctor even if we can't make a VECTOR_CST. It won't do anything for us, and it'll prevent us from representing it as a single constant. */ break; } /* Vector types use CONSTRUCTOR all the way through gimple compilation as a general initializer. */ for (ix = 0; VEC_iterate (constructor_elt, elts, ix, ce); ix++) { enum gimplify_status tret; tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val, fb_rvalue); if (tret == GS_ERROR) ret = GS_ERROR; } } break; default: /* So how did we get a CONSTRUCTOR for a scalar type? */ gcc_unreachable (); } if (ret == GS_ERROR) return GS_ERROR; else if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = object; return GS_OK; } else return GS_ALL_DONE; } /* Given a pointer value OP0, return a simplified version of an indirection through OP0, or NULL_TREE if no simplification is possible. This may only be applied to a rhs of an expression. Note that the resulting type may be different from the type pointed to in the sense that it is still compatible from the langhooks point of view. */ static tree fold_indirect_ref_rhs (tree t) { tree type = TREE_TYPE (TREE_TYPE (t)); tree sub = t; tree subtype; STRIP_USELESS_TYPE_CONVERSION (sub); subtype = TREE_TYPE (sub); if (!POINTER_TYPE_P (subtype)) return NULL_TREE; if (TREE_CODE (sub) == ADDR_EXPR) { tree op = TREE_OPERAND (sub, 0); tree optype = TREE_TYPE (op); /* *&p => p */ if (lang_hooks.types_compatible_p (type, optype)) return op; /* *(foo *)&fooarray => fooarray[0] */ else if (TREE_CODE (optype) == ARRAY_TYPE && lang_hooks.types_compatible_p (type, TREE_TYPE (optype))) { tree type_domain = TYPE_DOMAIN (optype); tree min_val = size_zero_node; if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); return build4 (ARRAY_REF, type, op, min_val, NULL_TREE, NULL_TREE); } } /* *(foo *)fooarrptr => (*fooarrptr)[0] */ if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE && lang_hooks.types_compatible_p (type, TREE_TYPE (TREE_TYPE (subtype)))) { tree type_domain; tree min_val = size_zero_node; tree osub = sub; sub = fold_indirect_ref_rhs (sub); if (! sub) sub = build1 (INDIRECT_REF, TREE_TYPE (subtype), osub); type_domain = TYPE_DOMAIN (TREE_TYPE (sub)); if (type_domain && TYPE_MIN_VALUE (type_domain)) min_val = TYPE_MIN_VALUE (type_domain); return build4 (ARRAY_REF, type, sub, min_val, NULL_TREE, NULL_TREE); } return NULL_TREE; } /* Subroutine of gimplify_modify_expr to do simplifications of MODIFY_EXPRs based on the code of the RHS. We loop for as long as something changes. */ static enum gimplify_status gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p, tree *pre_p, tree *post_p, bool want_value) { enum gimplify_status ret = GS_OK; while (ret != GS_UNHANDLED) switch (TREE_CODE (*from_p)) { case INDIRECT_REF: { /* If we have code like *(const A*)(A*)&x where the type of "x" is a (possibly cv-qualified variant of "A"), treat the entire expression as identical to "x". This kind of code arises in C++ when an object is bound to a const reference, and if "x" is a TARGET_EXPR we want to take advantage of the optimization below. */ tree t = fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0)); if (t) { *from_p = t; ret = GS_OK; } else ret = GS_UNHANDLED; break; } case TARGET_EXPR: { /* If we are initializing something from a TARGET_EXPR, strip the TARGET_EXPR and initialize it directly, if possible. This can't be done if the initializer is void, since that implies that the temporary is set in some non-trivial way. ??? What about code that pulls out the temp and uses it elsewhere? I think that such code never uses the TARGET_EXPR as an initializer. If I'm wrong, we'll die because the temp won't have any RTL. In that case, I guess we'll need to replace references somehow. */ tree init = TARGET_EXPR_INITIAL (*from_p); if (!VOID_TYPE_P (TREE_TYPE (init))) { *from_p = init; ret = GS_OK; } else ret = GS_UNHANDLED; } break; case COMPOUND_EXPR: /* Remove any COMPOUND_EXPR in the RHS so the following cases will be caught. */ gimplify_compound_expr (from_p, pre_p, true); ret = GS_OK; break; case CONSTRUCTOR: /* If we're initializing from a CONSTRUCTOR, break this into individual MODIFY_EXPRs. */ return gimplify_init_constructor (expr_p, pre_p, post_p, want_value); case COND_EXPR: /* If we're assigning to a non-register type, push the assignment down into the branches. This is mandatory for ADDRESSABLE types, since we cannot generate temporaries for such, but it saves a copy in other cases as well. */ if (!is_gimple_reg_type (TREE_TYPE (*from_p))) { /* This code should mirror the code in gimplify_cond_expr. */ enum tree_code code = TREE_CODE (*expr_p); tree cond = *from_p; tree result = *to_p; ret = gimplify_expr (&result, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node) TREE_OPERAND (cond, 1) = build2 (code, void_type_node, result, TREE_OPERAND (cond, 1)); if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node) TREE_OPERAND (cond, 2) = build2 (code, void_type_node, unshare_expr (result), TREE_OPERAND (cond, 2)); TREE_TYPE (cond) = void_type_node; recalculate_side_effects (cond); if (want_value) { gimplify_and_add (cond, pre_p); *expr_p = unshare_expr (result); } else *expr_p = cond; return ret; } else ret = GS_UNHANDLED; break; case CALL_EXPR: /* For calls that return in memory, give *to_p as the CALL_EXPR's return slot so that we don't generate a temporary. */ if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p) && aggregate_value_p (*from_p, *from_p)) { bool use_target; if (!(rhs_predicate_for (*to_p))(*from_p)) /* If we need a temporary, *to_p isn't accurate. */ use_target = false; else if (TREE_CODE (*to_p) == RESULT_DECL && DECL_NAME (*to_p) == NULL_TREE && needs_to_live_in_memory (*to_p)) /* It's OK to use the return slot directly unless it's an NRV. */ use_target = true; else if (is_gimple_reg_type (TREE_TYPE (*to_p)) || (DECL_P (*to_p) && DECL_REGISTER (*to_p))) /* Don't force regs into memory. */ use_target = false; else if (TREE_CODE (*to_p) == VAR_DECL && DECL_GIMPLE_FORMAL_TEMP_P (*to_p)) /* Don't use the original target if it's a formal temp; we don't want to take their addresses. */ use_target = false; else if (TREE_CODE (*expr_p) == INIT_EXPR) /* It's OK to use the target directly if it's being initialized. */ use_target = true; else if (!is_gimple_non_addressable (*to_p)) /* Don't use the original target if it's already addressable; if its address escapes, and the called function uses the NRV optimization, a conforming program could see *to_p change before the called function returns; see c++/19317. When optimizing, the return_slot pass marks more functions as safe after we have escape info. */ use_target = false; else use_target = true; if (use_target) { CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1; lang_hooks.mark_addressable (*to_p); } } ret = GS_UNHANDLED; break; /* If we're initializing from a container, push the initialization inside it. */ case CLEANUP_POINT_EXPR: case BIND_EXPR: case STATEMENT_LIST: { tree wrap = *from_p; tree t; ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; t = voidify_wrapper_expr (wrap, *expr_p); gcc_assert (t == *expr_p); if (want_value) { gimplify_and_add (wrap, pre_p); *expr_p = unshare_expr (*to_p); } else *expr_p = wrap; return GS_OK; } default: ret = GS_UNHANDLED; break; } return ret; } /* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a variable with DECL_COMPLEX_GIMPLE_REG_P set. */ static enum gimplify_status gimplify_modify_expr_complex_part (tree *expr_p, tree *pre_p, bool want_value) { enum tree_code code, ocode; tree lhs, rhs, new_rhs, other, realpart, imagpart; lhs = TREE_OPERAND (*expr_p, 0); rhs = TREE_OPERAND (*expr_p, 1); code = TREE_CODE (lhs); lhs = TREE_OPERAND (lhs, 0); ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR; other = build1 (ocode, TREE_TYPE (rhs), lhs); other = get_formal_tmp_var (other, pre_p); realpart = code == REALPART_EXPR ? rhs : other; imagpart = code == REALPART_EXPR ? other : rhs; if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart)) new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart); else new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart); TREE_OPERAND (*expr_p, 0) = lhs; TREE_OPERAND (*expr_p, 1) = new_rhs; if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = rhs; } return GS_ALL_DONE; } /* Gimplify the MODIFY_EXPR node pointed to by EXPR_P. modify_expr : varname '=' rhs | '*' ID '=' rhs PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_modify_expr (tree *expr_p, tree *pre_p, tree *post_p, bool want_value) { tree *from_p = &TREE_OPERAND (*expr_p, 1); tree *to_p = &TREE_OPERAND (*expr_p, 0); enum gimplify_status ret = GS_UNHANDLED; gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR || TREE_CODE (*expr_p) == INIT_EXPR); /* See if any simplifications can be done based on what the RHS is. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* For zero sized types only gimplify the left hand side and right hand side as statements and throw away the assignment. Do this after gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable types properly. */ if (zero_sized_type (TREE_TYPE (*from_p))) { gimplify_stmt (from_p); gimplify_stmt (to_p); append_to_statement_list (*from_p, pre_p); append_to_statement_list (*to_p, pre_p); *expr_p = NULL_TREE; return GS_ALL_DONE; } /* If the value being copied is of variable width, compute the length of the copy into a WITH_SIZE_EXPR. Note that we need to do this before gimplifying any of the operands so that we can resolve any PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses the size of the expression to be copied, not of the destination, so that is what we must here. */ maybe_with_size_expr (from_p); ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; ret = gimplify_expr (from_p, pre_p, post_p, rhs_predicate_for (*to_p), fb_rvalue); if (ret == GS_ERROR) return ret; /* Now see if the above changed *from_p to something we handle specially. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* If we've got a variable sized assignment between two lvalues (i.e. does not involve a call), then we can make things a bit more straightforward by converting the assignment to memcpy or memset. */ if (TREE_CODE (*from_p) == WITH_SIZE_EXPR) { tree from = TREE_OPERAND (*from_p, 0); tree size = TREE_OPERAND (*from_p, 1); if (TREE_CODE (from) == CONSTRUCTOR) return gimplify_modify_expr_to_memset (expr_p, size, want_value); if (is_gimple_addressable (from)) { *from_p = from; return gimplify_modify_expr_to_memcpy (expr_p, size, want_value); } } /* Transform partial stores to non-addressable complex variables into total stores. This allows us to use real instead of virtual operands for these variables, which improves optimization. */ if ((TREE_CODE (*to_p) == REALPART_EXPR || TREE_CODE (*to_p) == IMAGPART_EXPR) && is_gimple_reg (TREE_OPERAND (*to_p, 0))) return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value); if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p)) { /* If we've somehow already got an SSA_NAME on the LHS, then we're probably modified it twice. Not good. */ gcc_assert (TREE_CODE (*to_p) != SSA_NAME); *to_p = make_ssa_name (*to_p, *expr_p); } if (want_value) { append_to_statement_list (*expr_p, pre_p); *expr_p = *to_p; return GS_OK; } return GS_ALL_DONE; } /* Gimplify a comparison between two variable-sized objects. Do this with a call to BUILT_IN_MEMCMP. */ static enum gimplify_status gimplify_variable_sized_compare (tree *expr_p) { tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree args, t, dest; t = TYPE_SIZE_UNIT (TREE_TYPE (op0)); t = unshare_expr (t); t = SUBSTITUTE_PLACEHOLDER_IN_EXPR (t, op0); args = tree_cons (NULL, t, NULL); t = build_fold_addr_expr (op1); args = tree_cons (NULL, t, args); dest = build_fold_addr_expr (op0); args = tree_cons (NULL, dest, args); t = implicit_built_in_decls[BUILT_IN_MEMCMP]; t = build_function_call_expr (t, args); *expr_p = build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node); return GS_OK; } /* Gimplify a comparison between two aggregate objects of integral scalar mode as a comparison between the bitwise equivalent scalar values. */ static enum gimplify_status gimplify_scalar_mode_aggregate_compare (tree *expr_p) { tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree type = TREE_TYPE (op0); tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1); op0 = fold_build1 (VIEW_CONVERT_EXPR, scalar_type, op0); op1 = fold_build1 (VIEW_CONVERT_EXPR, scalar_type, op1); *expr_p = fold_build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1); return GS_OK; } /* Gimplify TRUTH_ANDIF_EXPR and TRUTH_ORIF_EXPR expressions. EXPR_P points to the expression to gimplify. Expressions of the form 'a && b' are gimplified to: a && b ? true : false gimplify_cond_expr will do the rest. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_boolean_expr (tree *expr_p) { /* Preserve the original type of the expression. */ tree type = TREE_TYPE (*expr_p); *expr_p = build3 (COND_EXPR, type, *expr_p, fold_convert (type, boolean_true_node), fold_convert (type, boolean_false_node)); return GS_OK; } /* Gimplifies an expression sequence. This function gimplifies each expression and re-writes the original expression with the last expression of the sequence in GIMPLE form. PRE_P points to the list where the side effects for all the expressions in the sequence will be emitted. WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */ /* ??? Should rearrange to share the pre-queue with all the indirect invocations of gimplify_expr. Would probably save on creations of statement_list nodes. */ static enum gimplify_status gimplify_compound_expr (tree *expr_p, tree *pre_p, bool want_value) { tree t = *expr_p; do { tree *sub_p = &TREE_OPERAND (t, 0); if (TREE_CODE (*sub_p) == COMPOUND_EXPR) gimplify_compound_expr (sub_p, pre_p, false); else gimplify_stmt (sub_p); append_to_statement_list (*sub_p, pre_p); t = TREE_OPERAND (t, 1); } while (TREE_CODE (t) == COMPOUND_EXPR); *expr_p = t; if (want_value) return GS_OK; else { gimplify_stmt (expr_p); return GS_ALL_DONE; } } /* Gimplifies a statement list. These may be created either by an enlightened front-end, or by shortcut_cond_expr. */ static enum gimplify_status gimplify_statement_list (tree *expr_p, tree *pre_p) { tree temp = voidify_wrapper_expr (*expr_p, NULL); tree_stmt_iterator i = tsi_start (*expr_p); while (!tsi_end_p (i)) { tree t; gimplify_stmt (tsi_stmt_ptr (i)); t = tsi_stmt (i); if (t == NULL) tsi_delink (&i); else if (TREE_CODE (t) == STATEMENT_LIST) { tsi_link_before (&i, t, TSI_SAME_STMT); tsi_delink (&i); } else tsi_next (&i); } if (temp) { append_to_statement_list (*expr_p, pre_p); *expr_p = temp; return GS_OK; } return GS_ALL_DONE; } /* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to gimplify. After gimplification, EXPR_P will point to a new temporary that holds the original value of the SAVE_EXPR node. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_save_expr (tree *expr_p, tree *pre_p, tree *post_p) { enum gimplify_status ret = GS_ALL_DONE; tree val; gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR); val = TREE_OPERAND (*expr_p, 0); /* If the SAVE_EXPR has not been resolved, then evaluate it once. */ if (!SAVE_EXPR_RESOLVED_P (*expr_p)) { /* The operand may be a void-valued expression such as SAVE_EXPRs generated by the Java frontend for class initialization. It is being executed only for its side-effects. */ if (TREE_TYPE (val) == void_type_node) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_stmt, fb_none); append_to_statement_list (TREE_OPERAND (*expr_p, 0), pre_p); val = NULL; } else val = get_initialized_tmp_var (val, pre_p, post_p); TREE_OPERAND (*expr_p, 0) = val; SAVE_EXPR_RESOLVED_P (*expr_p) = 1; } *expr_p = val; return ret; } /* Re-write the ADDR_EXPR node pointed to by EXPR_P unary_expr : ... | '&' varname ... PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_addr_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree expr = *expr_p; tree op0 = TREE_OPERAND (expr, 0); enum gimplify_status ret; switch (TREE_CODE (op0)) { case INDIRECT_REF: case MISALIGNED_INDIRECT_REF: do_indirect_ref: /* Check if we are dealing with an expression of the form '&*ptr'. While the front end folds away '&*ptr' into 'ptr', these expressions may be generated internally by the compiler (e.g., builtins like __builtin_va_end). */ /* Caution: the silent array decomposition semantics we allow for ADDR_EXPR means we can't always discard the pair. */ /* Gimplification of the ADDR_EXPR operand may drop cv-qualification conversions, so make sure we add them if needed. */ { tree op00 = TREE_OPERAND (op0, 0); tree t_expr = TREE_TYPE (expr); tree t_op00 = TREE_TYPE (op00); if (!lang_hooks.types_compatible_p (t_expr, t_op00)) { #ifdef ENABLE_CHECKING tree t_op0 = TREE_TYPE (op0); gcc_assert (POINTER_TYPE_P (t_expr) && cpt_same_type (TREE_CODE (t_op0) == ARRAY_TYPE ? TREE_TYPE (t_op0) : t_op0, TREE_TYPE (t_expr)) && POINTER_TYPE_P (t_op00) && cpt_same_type (t_op0, TREE_TYPE (t_op00))); #endif op00 = fold_convert (TREE_TYPE (expr), op00); } *expr_p = op00; ret = GS_OK; } break; case VIEW_CONVERT_EXPR: /* Take the address of our operand and then convert it to the type of this ADDR_EXPR. ??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at all clear. The impact of this transformation is even less clear. */ /* If the operand is a useless conversion, look through it. Doing so guarantees that the ADDR_EXPR and its operand will remain of the same type. */ if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0))) op0 = TREE_OPERAND (op0, 0); *expr_p = fold_convert (TREE_TYPE (expr), build_fold_addr_expr (TREE_OPERAND (op0, 0))); ret = GS_OK; break; default: /* We use fb_either here because the C frontend sometimes takes the address of a call that returns a struct; see gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make the implied temporary explicit. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p, is_gimple_addressable, fb_either); if (ret != GS_ERROR) { op0 = TREE_OPERAND (expr, 0); /* For various reasons, the gimplification of the expression may have made a new INDIRECT_REF. */ if (TREE_CODE (op0) == INDIRECT_REF) goto do_indirect_ref; /* Make sure TREE_INVARIANT, TREE_CONSTANT, and TREE_SIDE_EFFECTS is set properly. */ recompute_tree_invariant_for_addr_expr (expr); /* Mark the RHS addressable. */ lang_hooks.mark_addressable (TREE_OPERAND (expr, 0)); } break; } return ret; } /* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple value; output operands should be a gimple lvalue. */ static enum gimplify_status gimplify_asm_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree expr = *expr_p; int noutputs = list_length (ASM_OUTPUTS (expr)); const char **oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); int i; tree link; const char *constraint; bool allows_mem, allows_reg, is_inout; enum gimplify_status ret, tret; ret = GS_ALL_DONE; for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = TREE_CHAIN (link)) { size_t constraint_len; oconstraints[i] = constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); constraint_len = strlen (constraint); if (constraint_len == 0) continue; parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); if (!allows_reg && allows_mem) lang_hooks.mark_addressable (TREE_VALUE (link)); tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_inout ? is_gimple_min_lval : is_gimple_lvalue, fb_lvalue | fb_mayfail); if (tret == GS_ERROR) { error ("invalid lvalue in asm output %d", i); ret = tret; } if (is_inout) { /* An input/output operand. To give the optimizers more flexibility, split it into separate input and output operands. */ tree input; char buf[10]; /* Turn the in/out constraint into an output constraint. */ char *p = xstrdup (constraint); p[0] = '='; TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p); /* And add a matching input constraint. */ if (allows_reg) { sprintf (buf, "%d", i); /* If there are multiple alternatives in the constraint, handle each of them individually. Those that allow register will be replaced with operand number, the others will stay unchanged. */ if (strchr (p, ',') != NULL) { size_t len = 0, buflen = strlen (buf); char *beg, *end, *str, *dst; for (beg = p + 1;;) { end = strchr (beg, ','); if (end == NULL) end = strchr (beg, '\0'); if ((size_t) (end - beg) < buflen) len += buflen + 1; else len += end - beg + 1; if (*end) beg = end + 1; else break; } str = (char *) alloca (len); for (beg = p + 1, dst = str;;) { const char *tem; bool mem_p, reg_p, inout_p; end = strchr (beg, ','); if (end) *end = '\0'; beg[-1] = '='; tem = beg - 1; parse_output_constraint (&tem, i, 0, 0, &mem_p, &reg_p, &inout_p); if (dst != str) *dst++ = ','; if (reg_p) { memcpy (dst, buf, buflen); dst += buflen; } else { if (end) len = end - beg; else len = strlen (beg); memcpy (dst, beg, len); dst += len; } if (end) beg = end + 1; else break; } *dst = '\0'; input = build_string (dst - str, str); } else input = build_string (strlen (buf), buf); } else input = build_string (constraint_len - 1, constraint + 1); free (p); input = build_tree_list (build_tree_list (NULL_TREE, input), unshare_expr (TREE_VALUE (link))); ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input); } } for (link = ASM_INPUTS (expr); link; ++i, link = TREE_CHAIN (link)) { constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If we can't make copies, we can only accept memory. */ if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (link)))) { if (allows_mem) allows_reg = 0; else { error ("impossible constraint in %<asm%>"); error ("non-memory input %d must stay in memory", i); return GS_ERROR; } } /* If the operand is a memory input, it should be an lvalue. */ if (!allows_reg && allows_mem) { tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_lvalue, fb_lvalue | fb_mayfail); lang_hooks.mark_addressable (TREE_VALUE (link)); if (tret == GS_ERROR) { error ("memory input %d is not directly addressable", i); ret = tret; } } else { tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_asm_val, fb_rvalue); if (tret == GS_ERROR) ret = tret; } } return ret; } /* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we return to this function. FIXME should we complexify the prequeue handling instead? Or use flags for all the cleanups and let the optimizer tighten them up? The current code seems pretty fragile; it will break on a cleanup within any non-conditional nesting. But any such nesting would be broken, anyway; we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct and continues out of it. We can do that at the RTL level, though, so having an optimizer to tighten up try/finally regions would be a Good Thing. */ static enum gimplify_status gimplify_cleanup_point_expr (tree *expr_p, tree *pre_p) { tree_stmt_iterator iter; tree body; tree temp = voidify_wrapper_expr (*expr_p, NULL); /* We only care about the number of conditions between the innermost CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and any cleanups collected outside the CLEANUP_POINT_EXPR. */ int old_conds = gimplify_ctxp->conditions; tree old_cleanups = gimplify_ctxp->conditional_cleanups; gimplify_ctxp->conditions = 0; gimplify_ctxp->conditional_cleanups = NULL_TREE; body = TREE_OPERAND (*expr_p, 0); gimplify_to_stmt_list (&body); gimplify_ctxp->conditions = old_conds; gimplify_ctxp->conditional_cleanups = old_cleanups; for (iter = tsi_start (body); !tsi_end_p (iter); ) { tree *wce_p = tsi_stmt_ptr (iter); tree wce = *wce_p; if (TREE_CODE (wce) == WITH_CLEANUP_EXPR) { if (tsi_one_before_end_p (iter)) { tsi_link_before (&iter, TREE_OPERAND (wce, 0), TSI_SAME_STMT); tsi_delink (&iter); break; } else { tree sl, tfe; enum tree_code code; if (CLEANUP_EH_ONLY (wce)) code = TRY_CATCH_EXPR; else code = TRY_FINALLY_EXPR; sl = tsi_split_statement_list_after (&iter); tfe = build2 (code, void_type_node, sl, NULL_TREE); append_to_statement_list (TREE_OPERAND (wce, 0), &TREE_OPERAND (tfe, 1)); *wce_p = tfe; iter = tsi_start (sl); } } else tsi_next (&iter); } if (temp) { *expr_p = temp; append_to_statement_list (body, pre_p); return GS_OK; } else { *expr_p = body; return GS_ALL_DONE; } } /* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP is the cleanup action required. */ static void gimple_push_cleanup (tree var, tree cleanup, bool eh_only, tree *pre_p) { tree wce; /* Errors can result in improperly nested cleanups. Which results in confusion when trying to resolve the WITH_CLEANUP_EXPR. */ if (errorcount || sorrycount) return; if (gimple_conditional_context ()) { /* If we're in a conditional context, this is more complex. We only want to run the cleanup if we actually ran the initialization that necessitates it, but we want to run it after the end of the conditional context. So we wrap the try/finally around the condition and use a flag to determine whether or not to actually run the destructor. Thus test ? f(A()) : 0 becomes (approximately) flag = 0; try { if (test) { A::A(temp); flag = 1; val = f(temp); } else { val = 0; } } finally { if (flag) A::~A(temp); } val */ tree flag = create_tmp_var (boolean_type_node, "cleanup"); tree ffalse = build2 (MODIFY_EXPR, void_type_node, flag, boolean_false_node); tree ftrue = build2 (MODIFY_EXPR, void_type_node, flag, boolean_true_node); cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL); wce = build1 (WITH_CLEANUP_EXPR, void_type_node, cleanup); append_to_statement_list (ffalse, &gimplify_ctxp->conditional_cleanups); append_to_statement_list (wce, &gimplify_ctxp->conditional_cleanups); append_to_statement_list (ftrue, pre_p); /* Because of this manipulation, and the EH edges that jump threading cannot redirect, the temporary (VAR) will appear to be used uninitialized. Don't warn. */ TREE_NO_WARNING (var) = 1; } else { wce = build1 (WITH_CLEANUP_EXPR, void_type_node, cleanup); CLEANUP_EH_ONLY (wce) = eh_only; append_to_statement_list (wce, pre_p); } gimplify_stmt (&TREE_OPERAND (wce, 0)); } /* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */ static enum gimplify_status gimplify_target_expr (tree *expr_p, tree *pre_p, tree *post_p) { tree targ = *expr_p; tree temp = TARGET_EXPR_SLOT (targ); tree init = TARGET_EXPR_INITIAL (targ); enum gimplify_status ret; if (init) { /* TARGET_EXPR temps aren't part of the enclosing block, so add it to the temps list. */ gimple_add_tmp_var (temp); /* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the expression is supposed to initialize the slot. */ if (VOID_TYPE_P (TREE_TYPE (init))) ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); else { init = build2 (INIT_EXPR, void_type_node, temp, init); ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); } if (ret == GS_ERROR) { /* PR c++/28266 Make sure this is expanded only once. */ TARGET_EXPR_INITIAL (targ) = NULL_TREE; return GS_ERROR; } append_to_statement_list (init, pre_p); /* If needed, push the cleanup for the temp. */ if (TARGET_EXPR_CLEANUP (targ)) { gimplify_stmt (&TARGET_EXPR_CLEANUP (targ)); gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ), CLEANUP_EH_ONLY (targ), pre_p); } /* Only expand this once. */ TREE_OPERAND (targ, 3) = init; TARGET_EXPR_INITIAL (targ) = NULL_TREE; } else /* We should have expanded this before. */ gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp)); *expr_p = temp; return GS_OK; } /* Gimplification of expression trees. */ /* Gimplify an expression which appears at statement context; usually, this means replacing it with a suitably gimple STATEMENT_LIST. */ void gimplify_stmt (tree *stmt_p) { gimplify_expr (stmt_p, NULL, NULL, is_gimple_stmt, fb_none); } /* Similarly, but force the result to be a STATEMENT_LIST. */ void gimplify_to_stmt_list (tree *stmt_p) { gimplify_stmt (stmt_p); if (!*stmt_p) *stmt_p = alloc_stmt_list (); else if (TREE_CODE (*stmt_p) != STATEMENT_LIST) { tree t = *stmt_p; *stmt_p = alloc_stmt_list (); append_to_statement_list (t, stmt_p); } } /* Add FIRSTPRIVATE entries for DECL in the OpenMP the surrounding parallels to CTX. If entries already exist, force them to be some flavor of private. If there is no enclosing parallel, do nothing. */ void omp_firstprivatize_variable (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; if (decl == NULL || !DECL_P (decl)) return; do { n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { if (n->value & GOVD_SHARED) n->value = GOVD_FIRSTPRIVATE | (n->value & GOVD_SEEN); else return; } else if (ctx->is_parallel) omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE); ctx = ctx->outer_context; } while (ctx); } /* Similarly for each of the type sizes of TYPE. */ static void omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *ctx, tree type) { if (type == NULL || type == error_mark_node) return; type = TYPE_MAIN_VARIANT (type); if (pointer_set_insert (ctx->privatized_types, type)) return; switch (TREE_CODE (type)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case REAL_TYPE: omp_firstprivatize_variable (ctx, TYPE_MIN_VALUE (type)); omp_firstprivatize_variable (ctx, TYPE_MAX_VALUE (type)); break; case ARRAY_TYPE: omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type)); omp_firstprivatize_type_sizes (ctx, TYPE_DOMAIN (type)); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { tree field; for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { omp_firstprivatize_variable (ctx, DECL_FIELD_OFFSET (field)); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (field)); } } break; case POINTER_TYPE: case REFERENCE_TYPE: omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type)); break; default: break; } omp_firstprivatize_variable (ctx, TYPE_SIZE (type)); omp_firstprivatize_variable (ctx, TYPE_SIZE_UNIT (type)); lang_hooks.types.omp_firstprivatize_type_sizes (ctx, type); } /* Add an entry for DECL in the OpenMP context CTX with FLAGS. */ static void omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags) { splay_tree_node n; unsigned int nflags; tree t; if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) return; /* Never elide decls whose type has TREE_ADDRESSABLE set. This means there are constructors involved somewhere. */ if (TREE_ADDRESSABLE (TREE_TYPE (decl)) || TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl))) flags |= GOVD_SEEN; n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { /* We shouldn't be re-adding the decl with the same data sharing class. */ gcc_assert ((n->value & GOVD_DATA_SHARE_CLASS & flags) == 0); /* The only combination of data sharing classes we should see is FIRSTPRIVATE and LASTPRIVATE. */ nflags = n->value | flags; gcc_assert ((nflags & GOVD_DATA_SHARE_CLASS) == (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE)); n->value = nflags; return; } /* When adding a variable-sized variable, we have to handle all sorts of additional bits of data: the pointer replacement variable, and the parameters of the type. */ if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { /* Add the pointer replacement variable as PRIVATE if the variable replacement is private, else FIRSTPRIVATE since we'll need the address of the original variable either for SHARED, or for the copy into or out of the context. */ if (!(flags & GOVD_LOCAL)) { nflags = flags & GOVD_PRIVATE ? GOVD_PRIVATE : GOVD_FIRSTPRIVATE; nflags |= flags & GOVD_SEEN; t = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (t) == INDIRECT_REF); t = TREE_OPERAND (t, 0); gcc_assert (DECL_P (t)); omp_add_variable (ctx, t, nflags); } /* Add all of the variable and type parameters (which should have been gimplified to a formal temporary) as FIRSTPRIVATE. */ omp_firstprivatize_variable (ctx, DECL_SIZE_UNIT (decl)); omp_firstprivatize_variable (ctx, DECL_SIZE (decl)); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl)); /* The variable-sized variable itself is never SHARED, only some form of PRIVATE. The sharing would take place via the pointer variable which we remapped above. */ if (flags & GOVD_SHARED) flags = GOVD_PRIVATE | GOVD_DEBUG_PRIVATE | (flags & (GOVD_SEEN | GOVD_EXPLICIT)); /* We're going to make use of the TYPE_SIZE_UNIT at least in the alloca statement we generate for the variable, so make sure it is available. This isn't automatically needed for the SHARED case, since we won't be allocating local storage then. For local variables TYPE_SIZE_UNIT might not be gimplified yet, in this case omp_notice_variable will be called later on when it is gimplified. */ else if (! (flags & GOVD_LOCAL)) omp_notice_variable (ctx, TYPE_SIZE_UNIT (TREE_TYPE (decl)), true); } else if (lang_hooks.decls.omp_privatize_by_reference (decl)) { gcc_assert ((flags & GOVD_LOCAL) == 0); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl)); /* Similar to the direct variable sized case above, we'll need the size of references being privatized. */ if ((flags & GOVD_SHARED) == 0) { t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))); if (TREE_CODE (t) != INTEGER_CST) omp_notice_variable (ctx, t, true); } } splay_tree_insert (ctx->variables, (splay_tree_key)decl, flags); } /* Record the fact that DECL was used within the OpenMP context CTX. IN_CODE is true when real code uses DECL, and false when we should merely emit default(none) errors. Return true if DECL is going to be remapped and thus DECL shouldn't be gimplified into its DECL_VALUE_EXPR (if any). */ static bool omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code) { splay_tree_node n; unsigned flags = in_code ? GOVD_SEEN : 0; bool ret = false, shared; if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) return false; /* Threadprivate variables are predetermined. */ if (is_global_var (decl)) { if (DECL_THREAD_LOCAL_P (decl)) return false; if (DECL_HAS_VALUE_EXPR_P (decl)) { tree value = get_base_address (DECL_VALUE_EXPR (decl)); if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value)) return false; } } n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n == NULL) { enum omp_clause_default_kind default_kind, kind; if (!ctx->is_parallel) goto do_outer; /* ??? Some compiler-generated variables (like SAVE_EXPRs) could be remapped firstprivate instead of shared. To some extent this is addressed in omp_firstprivatize_type_sizes, but not effectively. */ default_kind = ctx->default_kind; kind = lang_hooks.decls.omp_predetermined_sharing (decl); if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED) default_kind = kind; switch (default_kind) { case OMP_CLAUSE_DEFAULT_NONE: error ("%qs not specified in enclosing parallel", IDENTIFIER_POINTER (DECL_NAME (decl))); error ("%Henclosing parallel", &ctx->location); /* FALLTHRU */ case OMP_CLAUSE_DEFAULT_SHARED: flags |= GOVD_SHARED; break; case OMP_CLAUSE_DEFAULT_PRIVATE: flags |= GOVD_PRIVATE; break; default: gcc_unreachable (); } omp_add_variable (ctx, decl, flags); shared = (flags & GOVD_SHARED) != 0; ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared); goto do_outer; } shared = ((flags | n->value) & GOVD_SHARED) != 0; ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared); /* If nothing changed, there's nothing left to do. */ if ((n->value & flags) == flags) return ret; flags |= n->value; n->value = flags; do_outer: /* If the variable is private in the current context, then we don't need to propagate anything to an outer context. */ if (flags & GOVD_PRIVATE) return ret; if (ctx->outer_context && omp_notice_variable (ctx->outer_context, decl, in_code)) return true; return ret; } /* Verify that DECL is private within CTX. If there's specific information to the contrary in the innermost scope, generate an error. */ static bool omp_is_private (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { if (n->value & GOVD_SHARED) { if (ctx == gimplify_omp_ctxp) { error ("iteration variable %qs should be private", IDENTIFIER_POINTER (DECL_NAME (decl))); n->value = GOVD_PRIVATE; return true; } else return false; } else if ((n->value & GOVD_EXPLICIT) != 0 && (ctx == gimplify_omp_ctxp || (ctx->is_combined_parallel && gimplify_omp_ctxp->outer_context == ctx))) { if ((n->value & GOVD_FIRSTPRIVATE) != 0) error ("iteration variable %qs should not be firstprivate", IDENTIFIER_POINTER (DECL_NAME (decl))); else if ((n->value & GOVD_REDUCTION) != 0) error ("iteration variable %qs should not be reduction", IDENTIFIER_POINTER (DECL_NAME (decl))); } return true; } if (ctx->is_parallel) return false; else if (ctx->outer_context) return omp_is_private (ctx->outer_context, decl); else return !is_global_var (decl); } /* Return true if DECL is private within a parallel region that binds to the current construct's context or in parallel region's REDUCTION clause. */ static bool omp_check_private (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; do { ctx = ctx->outer_context; if (ctx == NULL) return !(is_global_var (decl) /* References might be private, but might be shared too. */ || lang_hooks.decls.omp_privatize_by_reference (decl)); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if (n != NULL) return (n->value & GOVD_SHARED) == 0; } while (!ctx->is_parallel); return false; } /* Scan the OpenMP clauses in *LIST_P, installing mappings into a new and previous omp contexts. */ static void gimplify_scan_omp_clauses (tree *list_p, tree *pre_p, bool in_parallel, bool in_combined_parallel) { struct gimplify_omp_ctx *ctx, *outer_ctx; tree c; ctx = new_omp_context (in_parallel, in_combined_parallel); outer_ctx = ctx->outer_context; while ((c = *list_p) != NULL) { enum gimplify_status gs; bool remove = false; bool notice_outer = true; const char *check_non_private = NULL; unsigned int flags; tree decl; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: flags = GOVD_PRIVATE | GOVD_EXPLICIT; notice_outer = false; goto do_add; case OMP_CLAUSE_SHARED: flags = GOVD_SHARED | GOVD_EXPLICIT; goto do_add; case OMP_CLAUSE_FIRSTPRIVATE: flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT; check_non_private = "firstprivate"; goto do_add; case OMP_CLAUSE_LASTPRIVATE: flags = GOVD_LASTPRIVATE | GOVD_SEEN | GOVD_EXPLICIT; check_non_private = "lastprivate"; goto do_add; case OMP_CLAUSE_REDUCTION: flags = GOVD_REDUCTION | GOVD_SEEN | GOVD_EXPLICIT; check_non_private = "reduction"; goto do_add; do_add: decl = OMP_CLAUSE_DECL (c); if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) { remove = true; break; } omp_add_variable (ctx, decl, flags); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { omp_add_variable (ctx, OMP_CLAUSE_REDUCTION_PLACEHOLDER (c), GOVD_LOCAL | GOVD_SEEN); gimplify_omp_ctxp = ctx; push_gimplify_context (); gimplify_stmt (&OMP_CLAUSE_REDUCTION_INIT (c)); pop_gimplify_context (OMP_CLAUSE_REDUCTION_INIT (c)); push_gimplify_context (); gimplify_stmt (&OMP_CLAUSE_REDUCTION_MERGE (c)); pop_gimplify_context (OMP_CLAUSE_REDUCTION_MERGE (c)); gimplify_omp_ctxp = outer_ctx; } if (notice_outer) goto do_notice; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: decl = OMP_CLAUSE_DECL (c); if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node) { remove = true; break; } do_notice: if (outer_ctx) omp_notice_variable (outer_ctx, decl, true); if (check_non_private && !in_parallel && omp_check_private (ctx, decl)) { error ("%s variable %qs is private in outer context", check_non_private, IDENTIFIER_POINTER (DECL_NAME (decl))); remove = true; } break; case OMP_CLAUSE_IF: OMP_CLAUSE_OPERAND (c, 0) = gimple_boolify (OMP_CLAUSE_OPERAND (c, 0)); /* Fall through. */ case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NUM_THREADS: gs = gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL, is_gimple_val, fb_rvalue); if (gs == GS_ERROR) remove = true; break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: break; case OMP_CLAUSE_DEFAULT: ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c); break; default: gcc_unreachable (); } if (remove) *list_p = OMP_CLAUSE_CHAIN (c); else list_p = &OMP_CLAUSE_CHAIN (c); } gimplify_omp_ctxp = ctx; } /* For all variables that were not actually used within the context, remove PRIVATE, SHARED, and FIRSTPRIVATE clauses. */ static int gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data) { tree *list_p = (tree *) data; tree decl = (tree) n->key; unsigned flags = n->value; enum omp_clause_code code; tree clause; bool private_debug; if (flags & (GOVD_EXPLICIT | GOVD_LOCAL)) return 0; if ((flags & GOVD_SEEN) == 0) return 0; if (flags & GOVD_DEBUG_PRIVATE) { gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_PRIVATE); private_debug = true; } else private_debug = lang_hooks.decls.omp_private_debug_clause (decl, !!(flags & GOVD_SHARED)); if (private_debug) code = OMP_CLAUSE_PRIVATE; else if (flags & GOVD_SHARED) { if (is_global_var (decl)) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context; while (ctx != NULL) { splay_tree_node on = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if (on && (on->value & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE | GOVD_PRIVATE | GOVD_REDUCTION)) != 0) break; ctx = ctx->outer_context; } if (ctx == NULL) return 0; } code = OMP_CLAUSE_SHARED; } else if (flags & GOVD_PRIVATE) code = OMP_CLAUSE_PRIVATE; else if (flags & GOVD_FIRSTPRIVATE) code = OMP_CLAUSE_FIRSTPRIVATE; else gcc_unreachable (); clause = build_omp_clause (code); OMP_CLAUSE_DECL (clause) = decl; OMP_CLAUSE_CHAIN (clause) = *list_p; if (private_debug) OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1; *list_p = clause; return 0; } static void gimplify_adjust_omp_clauses (tree *list_p) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; tree c, decl; while ((c = *list_p) != NULL) { splay_tree_node n; bool remove = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); remove = !(n->value & GOVD_SEEN); if (! remove) { bool shared = OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED; if ((n->value & GOVD_DEBUG_PRIVATE) || lang_hooks.decls.omp_private_debug_clause (decl, shared)) { gcc_assert ((n->value & GOVD_DEBUG_PRIVATE) == 0 || ((n->value & GOVD_DATA_SHARE_CLASS) == GOVD_PRIVATE)); OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_PRIVATE_DEBUG (c) = 1; } } break; case OMP_CLAUSE_LASTPRIVATE: /* Make sure OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE is set to accurately reflect the presence of a FIRSTPRIVATE clause. */ decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c) = (n->value & GOVD_FIRSTPRIVATE) != 0; break; case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_DEFAULT: break; default: gcc_unreachable (); } if (remove) *list_p = OMP_CLAUSE_CHAIN (c); else list_p = &OMP_CLAUSE_CHAIN (c); } /* Add in any implicit data sharing. */ splay_tree_foreach (ctx->variables, gimplify_adjust_omp_clauses_1, list_p); gimplify_omp_ctxp = ctx->outer_context; delete_omp_context (ctx); } /* Gimplify the contents of an OMP_PARALLEL statement. This involves gimplification of the body, as well as scanning the body for used variables. We need to do this scan now, because variable-sized decls will be decomposed during gimplification. */ static enum gimplify_status gimplify_omp_parallel (tree *expr_p, tree *pre_p) { tree expr = *expr_p; gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p, true, OMP_PARALLEL_COMBINED (expr)); push_gimplify_context (); gimplify_stmt (&OMP_PARALLEL_BODY (expr)); if (TREE_CODE (OMP_PARALLEL_BODY (expr)) == BIND_EXPR) pop_gimplify_context (OMP_PARALLEL_BODY (expr)); else pop_gimplify_context (NULL_TREE); gimplify_adjust_omp_clauses (&OMP_PARALLEL_CLAUSES (expr)); return GS_ALL_DONE; } /* Gimplify the gross structure of an OMP_FOR statement. */ static enum gimplify_status gimplify_omp_for (tree *expr_p, tree *pre_p) { tree for_stmt, decl, t; enum gimplify_status ret = 0; for_stmt = *expr_p; gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p, false, false); t = OMP_FOR_INIT (for_stmt); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); decl = TREE_OPERAND (t, 0); gcc_assert (DECL_P (decl)); gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl))); /* Make sure the iteration variable is private. */ if (omp_is_private (gimplify_omp_ctxp, decl)) omp_notice_variable (gimplify_omp_ctxp, decl, true); else omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN); ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt), NULL, is_gimple_val, fb_rvalue); t = OMP_FOR_COND (for_stmt); gcc_assert (COMPARISON_CLASS_P (t)); gcc_assert (TREE_OPERAND (t, 0) == decl); ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt), NULL, is_gimple_val, fb_rvalue); t = OMP_FOR_INCR (for_stmt); switch (TREE_CODE (t)) { case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: t = build_int_cst (TREE_TYPE (decl), 1); goto build_modify; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: t = build_int_cst (TREE_TYPE (decl), -1); goto build_modify; build_modify: t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); t = build2 (MODIFY_EXPR, void_type_node, decl, t); OMP_FOR_INCR (for_stmt) = t; break; case MODIFY_EXPR: gcc_assert (TREE_OPERAND (t, 0) == decl); t = TREE_OPERAND (t, 1); switch (TREE_CODE (t)) { case PLUS_EXPR: if (TREE_OPERAND (t, 1) == decl) { TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = decl; break; } case MINUS_EXPR: gcc_assert (TREE_OPERAND (t, 0) == decl); break; default: gcc_unreachable (); } ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt), NULL, is_gimple_val, fb_rvalue); break; default: gcc_unreachable (); } gimplify_to_stmt_list (&OMP_FOR_BODY (for_stmt)); gimplify_adjust_omp_clauses (&OMP_FOR_CLAUSES (for_stmt)); return ret == GS_ALL_DONE ? GS_ALL_DONE : GS_ERROR; } /* Gimplify the gross structure of other OpenMP worksharing constructs. In particular, OMP_SECTIONS and OMP_SINGLE. */ static enum gimplify_status gimplify_omp_workshare (tree *expr_p, tree *pre_p) { tree stmt = *expr_p; gimplify_scan_omp_clauses (&OMP_CLAUSES (stmt), pre_p, false, false); gimplify_to_stmt_list (&OMP_BODY (stmt)); gimplify_adjust_omp_clauses (&OMP_CLAUSES (stmt)); return GS_ALL_DONE; } /* A subroutine of gimplify_omp_atomic. The front end is supposed to have stabilized the lhs of the atomic operation as *ADDR. Return true if EXPR is this stabilized form. */ static bool goa_lhs_expr_p (tree expr, tree addr) { /* Also include casts to other type variants. The C front end is fond of adding these for e.g. volatile variables. This is like STRIP_TYPE_NOPS but includes the main variant lookup. */ while ((TREE_CODE (expr) == NOP_EXPR || TREE_CODE (expr) == CONVERT_EXPR || TREE_CODE (expr) == NON_LVALUE_EXPR) && TREE_OPERAND (expr, 0) != error_mark_node && (TYPE_MAIN_VARIANT (TREE_TYPE (expr)) == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (expr, 0))))) expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == INDIRECT_REF && TREE_OPERAND (expr, 0) == addr) return true; if (TREE_CODE (addr) == ADDR_EXPR && expr == TREE_OPERAND (addr, 0)) return true; return false; } /* A subroutine of gimplify_omp_atomic. Attempt to implement the atomic operation as a __sync_fetch_and_op builtin. INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. Returns GS_UNHANDLED if the expression is not of the proper form. */ static enum gimplify_status gimplify_omp_atomic_fetch_op (tree *expr_p, tree addr, tree rhs, int index) { enum built_in_function base; tree decl, args, itype; enum insn_code *optab; /* Check for one of the supported fetch-op operations. */ switch (TREE_CODE (rhs)) { case PLUS_EXPR: base = BUILT_IN_FETCH_AND_ADD_N; optab = sync_add_optab; break; case MINUS_EXPR: base = BUILT_IN_FETCH_AND_SUB_N; optab = sync_add_optab; break; case BIT_AND_EXPR: base = BUILT_IN_FETCH_AND_AND_N; optab = sync_and_optab; break; case BIT_IOR_EXPR: base = BUILT_IN_FETCH_AND_OR_N; optab = sync_ior_optab; break; case BIT_XOR_EXPR: base = BUILT_IN_FETCH_AND_XOR_N; optab = sync_xor_optab; break; default: return GS_UNHANDLED; } /* Make sure the expression is of the proper form. */ if (goa_lhs_expr_p (TREE_OPERAND (rhs, 0), addr)) rhs = TREE_OPERAND (rhs, 1); else if (commutative_tree_code (TREE_CODE (rhs)) && goa_lhs_expr_p (TREE_OPERAND (rhs, 1), addr)) rhs = TREE_OPERAND (rhs, 0); else return GS_UNHANDLED; decl = built_in_decls[base + index + 1]; itype = TREE_TYPE (TREE_TYPE (decl)); if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing) return GS_UNHANDLED; args = tree_cons (NULL, fold_convert (itype, rhs), NULL); args = tree_cons (NULL, addr, args); *expr_p = build_function_call_expr (decl, args); return GS_OK; } /* A subroutine of gimplify_omp_atomic_pipeline. Walk *EXPR_P and replace appearances of *LHS_ADDR with LHS_VAR. If an expression does not involve the lhs, evaluate it into a temporary. Return 1 if the lhs appeared as a subexpression, 0 if it did not, or -1 if an error was encountered. */ static int goa_stabilize_expr (tree *expr_p, tree *pre_p, tree lhs_addr, tree lhs_var) { tree expr = *expr_p; int saw_lhs; if (goa_lhs_expr_p (expr, lhs_addr)) { *expr_p = lhs_var; return 1; } if (is_gimple_val (expr)) return 0; saw_lhs = 0; switch (TREE_CODE_CLASS (TREE_CODE (expr))) { case tcc_binary: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr, lhs_var); case tcc_unary: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr, lhs_var); break; default: break; } if (saw_lhs == 0) { enum gimplify_status gs; gs = gimplify_expr (expr_p, pre_p, NULL, is_gimple_val, fb_rvalue); if (gs != GS_ALL_DONE) saw_lhs = -1; } return saw_lhs; } /* A subroutine of gimplify_omp_atomic. Implement the atomic operation as: oldval = *addr; repeat: newval = rhs; // with oldval replacing *addr in rhs oldval = __sync_val_compare_and_swap (addr, oldval, newval); if (oldval != newval) goto repeat; INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. */ static enum gimplify_status gimplify_omp_atomic_pipeline (tree *expr_p, tree *pre_p, tree addr, tree rhs, int index) { tree oldval, oldival, oldival2, newval, newival, label; tree type, itype, cmpxchg, args, x, iaddr; cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1]; type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); itype = TREE_TYPE (TREE_TYPE (cmpxchg)); if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing) return GS_UNHANDLED; oldval = create_tmp_var (type, NULL); newval = create_tmp_var (type, NULL); /* Precompute as much of RHS as possible. In the same walk, replace occurrences of the lhs value with our temporary. */ if (goa_stabilize_expr (&rhs, pre_p, addr, oldval) < 0) return GS_ERROR; x = build_fold_indirect_ref (addr); x = build2 (MODIFY_EXPR, void_type_node, oldval, x); gimplify_and_add (x, pre_p); /* For floating-point values, we'll need to view-convert them to integers so that we can perform the atomic compare and swap. Simplify the following code by always setting up the "i"ntegral variables. */ if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) { oldival = oldval; newival = newval; iaddr = addr; } else { oldival = create_tmp_var (itype, NULL); newival = create_tmp_var (itype, NULL); x = build1 (VIEW_CONVERT_EXPR, itype, oldval); x = build2 (MODIFY_EXPR, void_type_node, oldival, x); gimplify_and_add (x, pre_p); iaddr = fold_convert (build_pointer_type (itype), addr); } oldival2 = create_tmp_var (itype, NULL); label = create_artificial_label (); x = build1 (LABEL_EXPR, void_type_node, label); gimplify_and_add (x, pre_p); x = build2 (MODIFY_EXPR, void_type_node, newval, rhs); gimplify_and_add (x, pre_p); if (newval != newival) { x = build1 (VIEW_CONVERT_EXPR, itype, newval); x = build2 (MODIFY_EXPR, void_type_node, newival, x); gimplify_and_add (x, pre_p); } x = build2 (MODIFY_EXPR, void_type_node, oldival2, fold_convert (itype, oldival)); gimplify_and_add (x, pre_p); args = tree_cons (NULL, fold_convert (itype, newival), NULL); args = tree_cons (NULL, fold_convert (itype, oldival), args); args = tree_cons (NULL, iaddr, args); x = build_function_call_expr (cmpxchg, args); if (oldval == oldival) x = fold_convert (type, x); x = build2 (MODIFY_EXPR, void_type_node, oldival, x); gimplify_and_add (x, pre_p); /* For floating point, be prepared for the loop backedge. */ if (oldval != oldival) { x = build1 (VIEW_CONVERT_EXPR, type, oldival); x = build2 (MODIFY_EXPR, void_type_node, oldval, x); gimplify_and_add (x, pre_p); } /* Note that we always perform the comparison as an integer, even for floating point. This allows the atomic operation to properly succeed even with NaNs and -0.0. */ x = build3 (COND_EXPR, void_type_node, build2 (NE_EXPR, boolean_type_node, oldival, oldival2), build1 (GOTO_EXPR, void_type_node, label), NULL); gimplify_and_add (x, pre_p); *expr_p = NULL; return GS_ALL_DONE; } /* A subroutine of gimplify_omp_atomic. Implement the atomic operation as: GOMP_atomic_start (); *addr = rhs; GOMP_atomic_end (); The result is not globally atomic, but works so long as all parallel references are within #pragma omp atomic directives. According to responses received from omp@openmp.org, appears to be within spec. Which makes sense, since that's how several other compilers handle this situation as well. */ static enum gimplify_status gimplify_omp_atomic_mutex (tree *expr_p, tree *pre_p, tree addr, tree rhs) { tree t; t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, pre_p); t = build_fold_indirect_ref (addr); t = build2 (MODIFY_EXPR, void_type_node, t, rhs); gimplify_and_add (t, pre_p); t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END]; t = build_function_call_expr (t, NULL); gimplify_and_add (t, pre_p); *expr_p = NULL; return GS_ALL_DONE; } /* Gimplify an OMP_ATOMIC statement. */ static enum gimplify_status gimplify_omp_atomic (tree *expr_p, tree *pre_p) { tree addr = TREE_OPERAND (*expr_p, 0); tree rhs = TREE_OPERAND (*expr_p, 1); tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); HOST_WIDE_INT index; /* Make sure the type is one of the supported sizes. */ index = tree_low_cst (TYPE_SIZE_UNIT (type), 1); index = exact_log2 (index); if (index >= 0 && index <= 4) { enum gimplify_status gs; unsigned int align; if (DECL_P (TREE_OPERAND (addr, 0))) align = DECL_ALIGN_UNIT (TREE_OPERAND (addr, 0)); else if (TREE_CODE (TREE_OPERAND (addr, 0)) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (TREE_OPERAND (addr, 0), 1)) == FIELD_DECL) align = DECL_ALIGN_UNIT (TREE_OPERAND (TREE_OPERAND (addr, 0), 1)); else align = TYPE_ALIGN_UNIT (type); /* __sync builtins require strict data alignment. */ if (exact_log2 (align) >= index) { /* When possible, use specialized atomic update functions. */ if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) { gs = gimplify_omp_atomic_fetch_op (expr_p, addr, rhs, index); if (gs != GS_UNHANDLED) return gs; } /* If we don't have specialized __sync builtins, try and implement as a compare and swap loop. */ gs = gimplify_omp_atomic_pipeline (expr_p, pre_p, addr, rhs, index); if (gs != GS_UNHANDLED) return gs; } } /* The ultimate fallback is wrapping the operation in a mutex. */ return gimplify_omp_atomic_mutex (expr_p, pre_p, addr, rhs); } /* Gimplifies the expression tree pointed to by EXPR_P. Return 0 if gimplification failed. PRE_P points to the list where side effects that must happen before EXPR should be stored. POST_P points to the list where side effects that must happen after EXPR should be stored, or NULL if there is no suitable list. In that case, we copy the result to a temporary, emit the post-effects, and then return the temporary. GIMPLE_TEST_F points to a function that takes a tree T and returns nonzero if T is in the GIMPLE form requested by the caller. The GIMPLE predicates are in tree-gimple.c. This test is used twice. Before gimplification, the test is invoked to determine whether *EXPR_P is already gimple enough. If that fails, *EXPR_P is gimplified according to its code and GIMPLE_TEST_F is called again. If the test still fails, then a new temporary variable is created and assigned the value of the gimplified expression. FALLBACK tells the function what sort of a temporary we want. If the 1 bit is set, an rvalue is OK. If the 2 bit is set, an lvalue is OK. If both are set, either is OK, but an lvalue is preferable. The return value is either GS_ERROR or GS_ALL_DONE, since this function iterates until solution. */ enum gimplify_status gimplify_expr (tree *expr_p, tree *pre_p, tree *post_p, bool (* gimple_test_f) (tree), fallback_t fallback) { tree tmp; tree internal_pre = NULL_TREE; tree internal_post = NULL_TREE; tree save_expr; int is_statement = (pre_p == NULL); location_t saved_location; enum gimplify_status ret; save_expr = *expr_p; if (save_expr == NULL_TREE) return GS_ALL_DONE; /* We used to check the predicate here and return immediately if it succeeds. This is wrong; the design is for gimplification to be idempotent, and for the predicates to only test for valid forms, not whether they are fully simplified. */ /* Set up our internal queues if needed. */ if (pre_p == NULL) pre_p = &internal_pre; if (post_p == NULL) post_p = &internal_post; saved_location = input_location; if (save_expr != error_mark_node && EXPR_HAS_LOCATION (*expr_p)) input_location = EXPR_LOCATION (*expr_p); /* Loop over the specific gimplifiers until the toplevel node remains the same. */ do { /* Strip away as many useless type conversions as possible at the toplevel. */ STRIP_USELESS_TYPE_CONVERSION (*expr_p); /* Remember the expr. */ save_expr = *expr_p; /* Die, die, die, my darling. */ if (save_expr == error_mark_node || (TREE_TYPE (save_expr) && TREE_TYPE (save_expr) == error_mark_node)) { ret = GS_ERROR; break; } /* Do any language-specific gimplification. */ ret = lang_hooks.gimplify_expr (expr_p, pre_p, post_p); if (ret == GS_OK) { if (*expr_p == NULL_TREE) break; if (*expr_p != save_expr) continue; } else if (ret != GS_UNHANDLED) break; ret = GS_OK; switch (TREE_CODE (*expr_p)) { /* First deal with the special cases. */ case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: ret = gimplify_self_mod_expr (expr_p, pre_p, post_p, fallback != fb_none); break; case ARRAY_REF: case ARRAY_RANGE_REF: case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: case VIEW_CONVERT_EXPR: ret = gimplify_compound_lval (expr_p, pre_p, post_p, fallback ? fallback : fb_rvalue); break; case COND_EXPR: ret = gimplify_cond_expr (expr_p, pre_p, fallback); /* C99 code may assign to an array in a structure value of a conditional expression, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); lang_hooks.mark_addressable (*expr_p); } break; case CALL_EXPR: ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none); /* C99 code may assign to an array in a structure returned from a function, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); lang_hooks.mark_addressable (*expr_p); } break; case TREE_LIST: gcc_unreachable (); case COMPOUND_EXPR: ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none); break; case MODIFY_EXPR: case INIT_EXPR: ret = gimplify_modify_expr (expr_p, pre_p, post_p, fallback != fb_none); /* The distinction between MODIFY_EXPR and INIT_EXPR is no longer useful. */ if (*expr_p && TREE_CODE (*expr_p) == INIT_EXPR) TREE_SET_CODE (*expr_p, MODIFY_EXPR); break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: ret = gimplify_boolean_expr (expr_p); break; case TRUTH_NOT_EXPR: TREE_OPERAND (*expr_p, 0) = gimple_boolify (TREE_OPERAND (*expr_p, 0)); ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case ADDR_EXPR: ret = gimplify_addr_expr (expr_p, pre_p, post_p); break; case VA_ARG_EXPR: ret = gimplify_va_arg_expr (expr_p, pre_p, post_p); break; case CONVERT_EXPR: case NOP_EXPR: if (IS_EMPTY_STMT (*expr_p)) { ret = GS_ALL_DONE; break; } if (VOID_TYPE_P (TREE_TYPE (*expr_p)) || fallback == fb_none) { /* Just strip a conversion to void (or in void context) and try again. */ *expr_p = TREE_OPERAND (*expr_p, 0); break; } ret = gimplify_conversion (expr_p); if (ret == GS_ERROR) break; if (*expr_p != save_expr) break; /* FALLTHRU */ case FIX_TRUNC_EXPR: case FIX_CEIL_EXPR: case FIX_FLOOR_EXPR: case FIX_ROUND_EXPR: /* unary_expr: ... | '(' cast ')' val | ... */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case INDIRECT_REF: *expr_p = fold_indirect_ref (*expr_p); if (*expr_p != save_expr) break; /* else fall through. */ case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_reg, fb_rvalue); recalculate_side_effects (*expr_p); break; /* Constants need not be gimplified. */ case INTEGER_CST: case REAL_CST: case STRING_CST: case COMPLEX_CST: case VECTOR_CST: ret = GS_ALL_DONE; break; case CONST_DECL: /* If we require an lvalue, such as for ADDR_EXPR, retain the CONST_DECL node. Otherwise the decl is replaceable by its value. */ /* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */ if (fallback & fb_lvalue) ret = GS_ALL_DONE; else *expr_p = DECL_INITIAL (*expr_p); break; case DECL_EXPR: ret = gimplify_decl_expr (expr_p); break; case EXC_PTR_EXPR: /* FIXME make this a decl. */ ret = GS_ALL_DONE; break; case BIND_EXPR: ret = gimplify_bind_expr (expr_p, pre_p); break; case LOOP_EXPR: ret = gimplify_loop_expr (expr_p, pre_p); break; case SWITCH_EXPR: ret = gimplify_switch_expr (expr_p, pre_p); break; case EXIT_EXPR: ret = gimplify_exit_expr (expr_p); break; case GOTO_EXPR: /* If the target is not LABEL, then it is a computed jump and the target needs to be gimplified. */ if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL) ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p, NULL, is_gimple_val, fb_rvalue); break; case LABEL_EXPR: ret = GS_ALL_DONE; gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p)) == current_function_decl); break; case CASE_LABEL_EXPR: ret = gimplify_case_label_expr (expr_p); break; case RETURN_EXPR: ret = gimplify_return_expr (*expr_p, pre_p); break; case CONSTRUCTOR: /* Don't reduce this in place; let gimplify_init_constructor work its magic. Buf if we're just elaborating this for side effects, just gimplify any element that has side-effects. */ if (fallback == fb_none) { unsigned HOST_WIDE_INT ix; constructor_elt *ce; tree temp = NULL_TREE; for (ix = 0; VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (*expr_p), ix, ce); ix++) if (TREE_SIDE_EFFECTS (ce->value)) append_to_statement_list (ce->value, &temp); *expr_p = temp; ret = GS_OK; } /* C99 code may assign to an array in a constructed structure or union, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ else if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); lang_hooks.mark_addressable (*expr_p); } else ret = GS_ALL_DONE; break; /* The following are special cases that are not handled by the original GIMPLE grammar. */ /* SAVE_EXPR nodes are converted into a GIMPLE identifier and eliminated. */ case SAVE_EXPR: ret = gimplify_save_expr (expr_p, pre_p, post_p); break; case BIT_FIELD_REF: { enum gimplify_status r0, r1, r2; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_either); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); ret = MIN (r0, MIN (r1, r2)); } break; case NON_LVALUE_EXPR: /* This should have been stripped above. */ gcc_unreachable (); case ASM_EXPR: ret = gimplify_asm_expr (expr_p, pre_p, post_p); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: gimplify_to_stmt_list (&TREE_OPERAND (*expr_p, 0)); gimplify_to_stmt_list (&TREE_OPERAND (*expr_p, 1)); ret = GS_ALL_DONE; break; case CLEANUP_POINT_EXPR: ret = gimplify_cleanup_point_expr (expr_p, pre_p); break; case TARGET_EXPR: ret = gimplify_target_expr (expr_p, pre_p, post_p); break; case CATCH_EXPR: gimplify_to_stmt_list (&CATCH_BODY (*expr_p)); ret = GS_ALL_DONE; break; case EH_FILTER_EXPR: gimplify_to_stmt_list (&EH_FILTER_FAILURE (*expr_p)); ret = GS_ALL_DONE; break; case OBJ_TYPE_REF: { enum gimplify_status r0, r1; r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); } break; case LABEL_DECL: /* We get here when taking the address of a label. We mark the label as "forced"; meaning it can never be removed and it is a potential target for any computed goto. */ FORCED_LABEL (*expr_p) = 1; ret = GS_ALL_DONE; break; case STATEMENT_LIST: ret = gimplify_statement_list (expr_p, pre_p); break; case WITH_SIZE_EXPR: { gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p == &internal_post ? NULL : post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); } break; case VAR_DECL: case PARM_DECL: ret = gimplify_var_or_parm_decl (expr_p); break; case RESULT_DECL: /* When within an OpenMP context, notice uses of variables. */ if (gimplify_omp_ctxp) omp_notice_variable (gimplify_omp_ctxp, *expr_p, true); ret = GS_ALL_DONE; break; case SSA_NAME: /* Allow callbacks into the gimplifier during optimization. */ ret = GS_ALL_DONE; break; case OMP_PARALLEL: ret = gimplify_omp_parallel (expr_p, pre_p); break; case OMP_FOR: ret = gimplify_omp_for (expr_p, pre_p); break; case OMP_SECTIONS: case OMP_SINGLE: ret = gimplify_omp_workshare (expr_p, pre_p); break; case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: gimplify_to_stmt_list (&OMP_BODY (*expr_p)); break; case OMP_ATOMIC: ret = gimplify_omp_atomic (expr_p, pre_p); break; case OMP_RETURN: case OMP_CONTINUE: ret = GS_ALL_DONE; break; default: switch (TREE_CODE_CLASS (TREE_CODE (*expr_p))) { case tcc_comparison: /* Handle comparison of objects of non scalar mode aggregates with a call to memcmp. It would be nice to only have to do this for variable-sized objects, but then we'd have to allow the same nest of reference nodes we allow for MODIFY_EXPR and that's too complex. Compare scalar mode aggregates as scalar mode values. Using memcmp for them would be very inefficient at best, and is plain wrong if bitfields are involved. */ { tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1)); if (!AGGREGATE_TYPE_P (type)) goto expr_2; else if (TYPE_MODE (type) != BLKmode) ret = gimplify_scalar_mode_aggregate_compare (expr_p); else ret = gimplify_variable_sized_compare (expr_p); break; } /* If *EXPR_P does not need to be special-cased, handle it according to its class. */ case tcc_unary: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); break; case tcc_binary: expr_2: { enum gimplify_status r0, r1; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); break; } case tcc_declaration: case tcc_constant: ret = GS_ALL_DONE; goto dont_recalculate; default: gcc_assert (TREE_CODE (*expr_p) == TRUTH_AND_EXPR || TREE_CODE (*expr_p) == TRUTH_OR_EXPR || TREE_CODE (*expr_p) == TRUTH_XOR_EXPR); goto expr_2; } recalculate_side_effects (*expr_p); dont_recalculate: break; } /* If we replaced *expr_p, gimplify again. */ if (ret == GS_OK && (*expr_p == NULL || *expr_p == save_expr)) ret = GS_ALL_DONE; } while (ret == GS_OK); /* If we encountered an error_mark somewhere nested inside, either stub out the statement or propagate the error back out. */ if (ret == GS_ERROR) { if (is_statement) *expr_p = NULL; goto out; } /* This was only valid as a return value from the langhook, which we handled. Make sure it doesn't escape from any other context. */ gcc_assert (ret != GS_UNHANDLED); if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p)) { /* We aren't looking for a value, and we don't have a valid statement. If it doesn't have side-effects, throw it away. */ if (!TREE_SIDE_EFFECTS (*expr_p)) *expr_p = NULL; else if (!TREE_THIS_VOLATILE (*expr_p)) { /* This is probably a _REF that contains something nested that has side effects. Recurse through the operands to find it. */ enum tree_code code = TREE_CODE (*expr_p); switch (code) { case COMPONENT_REF: case REALPART_EXPR: case IMAGPART_EXPR: case VIEW_CONVERT_EXPR: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); break; case ARRAY_REF: case ARRAY_RANGE_REF: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, gimple_test_f, fallback); break; default: /* Anything else with side-effects must be converted to a valid statement before we get here. */ gcc_unreachable (); } *expr_p = NULL; } else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p)) && TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode) { /* Historically, the compiler has treated a bare reference to a non-BLKmode volatile lvalue as forcing a load. */ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p)); /* Normally, we do not want to create a temporary for a TREE_ADDRESSABLE type because such a type should not be copied by bitwise-assignment. However, we make an exception here, as all we are doing here is ensuring that we read the bytes that make up the type. We use create_tmp_var_raw because create_tmp_var will abort when given a TREE_ADDRESSABLE type. */ tree tmp = create_tmp_var_raw (type, "vol"); gimple_add_tmp_var (tmp); *expr_p = build2 (MODIFY_EXPR, type, tmp, *expr_p); } else /* We can't do anything useful with a volatile reference to an incomplete type, so just throw it away. Likewise for a BLKmode type, since any implicit inner load should already have been turned into an explicit one by the gimplification process. */ *expr_p = NULL; } /* If we are gimplifying at the statement level, we're done. Tack everything together and replace the original statement with the gimplified form. */ if (fallback == fb_none || is_statement) { if (internal_pre || internal_post) { append_to_statement_list (*expr_p, &internal_pre); append_to_statement_list (internal_post, &internal_pre); annotate_all_with_locus (&internal_pre, input_location); *expr_p = internal_pre; } else if (!*expr_p) ; else if (TREE_CODE (*expr_p) == STATEMENT_LIST) annotate_all_with_locus (expr_p, input_location); else annotate_one_with_locus (*expr_p, input_location); goto out; } /* Otherwise we're gimplifying a subexpression, so the resulting value is interesting. */ /* If it's sufficiently simple already, we're done. Unless we are handling some post-effects internally; if that's the case, we need to copy into a temp before adding the post-effects to the tree. */ if (!internal_post && (*gimple_test_f) (*expr_p)) goto out; /* Otherwise, we need to create a new temporary for the gimplified expression. */ /* We can't return an lvalue if we have an internal postqueue. The object the lvalue refers to would (probably) be modified by the postqueue; we need to copy the value out first, which means an rvalue. */ if ((fallback & fb_lvalue) && !internal_post && is_gimple_addressable (*expr_p)) { /* An lvalue will do. Take the address of the expression, store it in a temporary, and replace the expression with an INDIRECT_REF of that temporary. */ tmp = build_fold_addr_expr (*expr_p); gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue); *expr_p = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (tmp)), tmp); } else if ((fallback & fb_rvalue) && is_gimple_formal_tmp_rhs (*expr_p)) { gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p))); /* An rvalue will do. Assign the gimplified expression into a new temporary TMP and replace the original expression with TMP. */ if (internal_post || (fallback & fb_lvalue)) /* The postqueue might change the value of the expression between the initialization and use of the temporary, so we can't use a formal temp. FIXME do we care? */ *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p); else *expr_p = get_formal_tmp_var (*expr_p, pre_p); if (TREE_CODE (*expr_p) != SSA_NAME) DECL_GIMPLE_FORMAL_TEMP_P (*expr_p) = 1; } else { #ifdef ENABLE_CHECKING if (!(fallback & fb_mayfail)) { fprintf (stderr, "gimplification failed:\n"); print_generic_expr (stderr, *expr_p, 0); debug_tree (*expr_p); internal_error ("gimplification failed"); } #endif gcc_assert (fallback & fb_mayfail); /* If this is an asm statement, and the user asked for the impossible, don't die. Fail and let gimplify_asm_expr issue an error. */ ret = GS_ERROR; goto out; } /* Make sure the temporary matches our predicate. */ gcc_assert ((*gimple_test_f) (*expr_p)); if (internal_post) { annotate_all_with_locus (&internal_post, input_location); append_to_statement_list (internal_post, pre_p); } out: input_location = saved_location; return ret; } /* Look through TYPE for variable-sized objects and gimplify each such size that we find. Add to LIST_P any statements generated. */ void gimplify_type_sizes (tree type, tree *list_p) { tree field, t; if (type == NULL || type == error_mark_node) return; /* We first do the main variant, then copy into any other variants. */ type = TYPE_MAIN_VARIANT (type); /* Avoid infinite recursion. */ if (TYPE_SIZES_GIMPLIFIED (type)) return; TYPE_SIZES_GIMPLIFIED (type) = 1; switch (TREE_CODE (type)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case REAL_TYPE: gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p); gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type); TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type); } break; case ARRAY_TYPE: /* These types may not have declarations, so handle them here. */ gimplify_type_sizes (TREE_TYPE (type), list_p); gimplify_type_sizes (TYPE_DOMAIN (type), list_p); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p); gimplify_type_sizes (TREE_TYPE (field), list_p); } break; case POINTER_TYPE: case REFERENCE_TYPE: /* We used to recurse on the pointed-to type here, which turned out to be incorrect because its definition might refer to variables not yet initialized at this point if a forward declaration is involved. It was actually useful for anonymous pointed-to types to ensure that the sizes evaluation dominates every possible later use of the values. Restricting to such types here would be safe since there is no possible forward declaration around, but would introduce an undesirable middle-end semantic to anonymity. We then defer to front-ends the responsibility of ensuring that the sizes are evaluated both early and late enough, e.g. by attaching artificial type declarations to the tree. */ break; default: break; } gimplify_one_sizepos (&TYPE_SIZE (type), list_p); gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_SIZE (t) = TYPE_SIZE (type); TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type); TYPE_SIZES_GIMPLIFIED (t) = 1; } } /* A subroutine of gimplify_type_sizes to make sure that *EXPR_P, a size or position, has had all of its SAVE_EXPRs evaluated. We add any required statements to STMT_P. */ void gimplify_one_sizepos (tree *expr_p, tree *stmt_p) { tree type, expr = *expr_p; /* We don't do anything if the value isn't there, is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do anything if it's already a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier will want to replace it with a new variable, but that will cause problems if this type is from outside the function. It's OK to have that here. */ if (expr == NULL_TREE || TREE_CONSTANT (expr) || TREE_CODE (expr) == VAR_DECL || CONTAINS_PLACEHOLDER_P (expr)) return; type = TREE_TYPE (expr); *expr_p = unshare_expr (expr); gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue); expr = *expr_p; /* Verify that we've an exact type match with the original expression. In particular, we do not wish to drop a "sizetype" in favour of a type of similar dimensions. We don't want to pollute the generic type-stripping code with this knowledge because it doesn't matter for the bulk of GENERIC/GIMPLE. It only matters that TYPE_SIZE_UNIT and friends retain their "sizetype-ness". */ if (TREE_TYPE (expr) != type && TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type)) { tree tmp; *expr_p = create_tmp_var (type, NULL); tmp = build1 (NOP_EXPR, type, expr); tmp = build2 (MODIFY_EXPR, type, *expr_p, tmp); if (EXPR_HAS_LOCATION (expr)) SET_EXPR_LOCUS (tmp, EXPR_LOCUS (expr)); else SET_EXPR_LOCATION (tmp, input_location); gimplify_and_add (tmp, stmt_p); } } #ifdef ENABLE_CHECKING /* Compare types A and B for a "close enough" match. */ static bool cpt_same_type (tree a, tree b) { if (lang_hooks.types_compatible_p (a, b)) return true; /* ??? The C++ FE decomposes METHOD_TYPES to FUNCTION_TYPES and doesn't link them together. This routine is intended to catch type errors that will affect the optimizers, and the optimizers don't add new dereferences of function pointers, so ignore it. */ if ((TREE_CODE (a) == FUNCTION_TYPE || TREE_CODE (a) == METHOD_TYPE) && (TREE_CODE (b) == FUNCTION_TYPE || TREE_CODE (b) == METHOD_TYPE)) return true; /* ??? The C FE pushes type qualifiers after the fact into the type of the element from the type of the array. See build_unary_op's handling of ADDR_EXPR. This seems wrong -- if we were going to do this, we should have done it when creating the variable in the first place. Alternately, why aren't the two array types made variants? */ if (TREE_CODE (a) == ARRAY_TYPE && TREE_CODE (b) == ARRAY_TYPE) return cpt_same_type (TREE_TYPE (a), TREE_TYPE (b)); /* And because of those, we have to recurse down through pointers. */ if (POINTER_TYPE_P (a) && POINTER_TYPE_P (b)) return cpt_same_type (TREE_TYPE (a), TREE_TYPE (b)); return false; } /* Check for some cases of the front end missing cast expressions. The type of a dereference should correspond to the pointer type; similarly the type of an address should match its object. */ static tree check_pointer_types_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED, void *data ATTRIBUTE_UNUSED) { tree t = *tp; tree ptype, otype, dtype; switch (TREE_CODE (t)) { case INDIRECT_REF: case ARRAY_REF: otype = TREE_TYPE (t); ptype = TREE_TYPE (TREE_OPERAND (t, 0)); dtype = TREE_TYPE (ptype); gcc_assert (cpt_same_type (otype, dtype)); break; case ADDR_EXPR: ptype = TREE_TYPE (t); otype = TREE_TYPE (TREE_OPERAND (t, 0)); dtype = TREE_TYPE (ptype); if (!cpt_same_type (otype, dtype)) { /* &array is allowed to produce a pointer to the element, rather than a pointer to the array type. We must allow this in order to properly represent assigning the address of an array in C into pointer to the element type. */ gcc_assert (TREE_CODE (otype) == ARRAY_TYPE && POINTER_TYPE_P (ptype) && cpt_same_type (TREE_TYPE (otype), dtype)); break; } break; default: return NULL_TREE; } return NULL_TREE; } #endif /* Gimplify the body of statements pointed to by BODY_P. FNDECL is the function decl containing BODY. */ void gimplify_body (tree *body_p, tree fndecl, bool do_parms) { location_t saved_location = input_location; tree body, parm_stmts; timevar_push (TV_TREE_GIMPLIFY); gcc_assert (gimplify_ctxp == NULL); push_gimplify_context (); /* Unshare most shared trees in the body and in that of any nested functions. It would seem we don't have to do this for nested functions because they are supposed to be output and then the outer function gimplified first, but the g++ front end doesn't always do it that way. */ unshare_body (body_p, fndecl); unvisit_body (body_p, fndecl); /* Make sure input_location isn't set to something wierd. */ input_location = DECL_SOURCE_LOCATION (fndecl); /* Resolve callee-copies. This has to be done before processing the body so that DECL_VALUE_EXPR gets processed correctly. */ parm_stmts = do_parms ? gimplify_parameters () : NULL; /* Gimplify the function's body. */ gimplify_stmt (body_p); body = *body_p; if (!body) body = alloc_stmt_list (); else if (TREE_CODE (body) == STATEMENT_LIST) { tree t = expr_only (*body_p); if (t) body = t; } /* If there isn't an outer BIND_EXPR, add one. */ if (TREE_CODE (body) != BIND_EXPR) { tree b = build3 (BIND_EXPR, void_type_node, NULL_TREE, NULL_TREE, NULL_TREE); TREE_SIDE_EFFECTS (b) = 1; append_to_statement_list_force (body, &BIND_EXPR_BODY (b)); body = b; } /* If we had callee-copies statements, insert them at the beginning of the function. */ if (parm_stmts) { append_to_statement_list_force (BIND_EXPR_BODY (body), &parm_stmts); BIND_EXPR_BODY (body) = parm_stmts; } /* Unshare again, in case gimplification was sloppy. */ unshare_all_trees (body); *body_p = body; pop_gimplify_context (body); gcc_assert (gimplify_ctxp == NULL); #ifdef ENABLE_CHECKING walk_tree (body_p, check_pointer_types_r, NULL, NULL); #endif timevar_pop (TV_TREE_GIMPLIFY); input_location = saved_location; } /* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL node for the function we want to gimplify. */ void gimplify_function_tree (tree fndecl) { tree oldfn, parm, ret; oldfn = current_function_decl; current_function_decl = fndecl; cfun = DECL_STRUCT_FUNCTION (fndecl); if (cfun == NULL) allocate_struct_function (fndecl); for (parm = DECL_ARGUMENTS (fndecl); parm ; parm = TREE_CHAIN (parm)) { /* Preliminarily mark non-addressed complex variables as eligible for promotion to gimple registers. We'll transform their uses as we find them. */ if (TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE && !TREE_THIS_VOLATILE (parm) && !needs_to_live_in_memory (parm)) DECL_COMPLEX_GIMPLE_REG_P (parm) = 1; } ret = DECL_RESULT (fndecl); if (TREE_CODE (TREE_TYPE (ret)) == COMPLEX_TYPE && !needs_to_live_in_memory (ret)) DECL_COMPLEX_GIMPLE_REG_P (ret) = 1; gimplify_body (&DECL_SAVED_TREE (fndecl), fndecl, true); /* If we're instrumenting function entry/exit, then prepend the call to the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to catch the exit hook. */ /* ??? Add some way to ignore exceptions for this TFE. */ if (flag_instrument_function_entry_exit && !DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl) && !flag_instrument_functions_exclude_p (fndecl)) { tree tf, x, bind; tf = build2 (TRY_FINALLY_EXPR, void_type_node, NULL, NULL); TREE_SIDE_EFFECTS (tf) = 1; x = DECL_SAVED_TREE (fndecl); append_to_statement_list (x, &TREE_OPERAND (tf, 0)); x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_EXIT]; x = build_function_call_expr (x, NULL); append_to_statement_list (x, &TREE_OPERAND (tf, 1)); bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_ENTER]; x = build_function_call_expr (x, NULL); append_to_statement_list (x, &BIND_EXPR_BODY (bind)); append_to_statement_list (tf, &BIND_EXPR_BODY (bind)); DECL_SAVED_TREE (fndecl) = bind; } current_function_decl = oldfn; cfun = oldfn ? DECL_STRUCT_FUNCTION (oldfn) : NULL; } /* Expands EXPR to list of gimple statements STMTS. If SIMPLE is true, force the result to be either ssa_name or an invariant, otherwise just force it to be a rhs expression. If VAR is not NULL, make the base variable of the final destination be VAR if suitable. */ tree force_gimple_operand (tree expr, tree *stmts, bool simple, tree var) { tree t; enum gimplify_status ret; gimple_predicate gimple_test_f; *stmts = NULL_TREE; if (is_gimple_val (expr)) return expr; gimple_test_f = simple ? is_gimple_val : is_gimple_reg_rhs; push_gimplify_context (); gimplify_ctxp->into_ssa = in_ssa_p; if (var) expr = build2 (MODIFY_EXPR, TREE_TYPE (var), var, expr); ret = gimplify_expr (&expr, stmts, NULL, gimple_test_f, fb_rvalue); gcc_assert (ret != GS_ERROR); if (referenced_vars) { for (t = gimplify_ctxp->temps; t ; t = TREE_CHAIN (t)) add_referenced_var (t); } pop_gimplify_context (NULL); return expr; } /* Invokes force_gimple_operand for EXPR with parameters SIMPLE_P and VAR. If some statements are produced, emits them before BSI. */ tree force_gimple_operand_bsi (block_stmt_iterator *bsi, tree expr, bool simple_p, tree var) { tree stmts; expr = force_gimple_operand (expr, &stmts, simple_p, var); if (stmts) bsi_insert_before (bsi, stmts, BSI_SAME_STMT); return expr; } #include "gt-gimplify.h"
matmul.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ void matmul(level_type * level, double *C, int * id_A, int * id_B, int rows, int cols, int A_equals_B_transpose){ // *id_A = m vector_id's (conceptually pointers to the rows of a m x level->num_my_boxes*volume matrix) // *id_B = n vector_id's (conceptually pointers to the columns of a level->num_my_boxes*volume matrix x n) // *C is a mxn matrix where C[rows][cols] = dot(id_A[rows],id_B[cols]) // FIX, id_A and id_B are likely the same and thus C[][] will be symmetric (modulo missing row?) // if(A_equals_B_transpose && (cols>=rows)) then use id_B and only run for nn>=mm // common case for s-step Krylov methods // C_is_symmetric && cols< rows (use id_A) int mm,nn; double _timeStart = getTime(); // FIX... rather than performing an all_reduce on the essentially symmetric [G,g], do the all_reduce on the upper triangle and then duplicate (saves BW) #ifdef _OPENMP #pragma omp parallel for schedule(static,1) collapse(2) #endif for(mm=0;mm<rows;mm++){ for(nn=0;nn<cols;nn++){ if(nn>=mm){ // upper triangular int box; double a_dot_b_level = 0.0; for(box=0;box<level->num_my_boxes;box++){ int i,j,k; const int jStride = level->my_boxes[box].jStride; const int kStride = level->my_boxes[box].kStride; const int ghosts = level->my_boxes[box].ghosts; const int dim = level->my_boxes[box].dim; double * __restrict__ grid_a = level->my_boxes[box].vectors[id_A[mm]] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point double * __restrict__ grid_b = level->my_boxes[box].vectors[id_B[nn]] + ghosts*(1+jStride+kStride); double a_dot_b_box = 0.0; for(k=0;k<dim;k++){ for(j=0;j<dim;j++){ for(i=0;i<dim;i++){ int ijk = i + j*jStride + k*kStride; a_dot_b_box += grid_a[ijk]*grid_b[ijk]; }}} a_dot_b_level+=a_dot_b_box; } C[mm*cols + nn] = a_dot_b_level; // C[mm][nn] if((mm<cols)&&(nn<rows)){C[nn*cols + mm] = a_dot_b_level;}// C[nn][mm] } }} level->timers.blas3 += (double)(getTime()-_timeStart); #ifdef USE_MPI double *send_buffer = (double*)malloc(rows*cols*sizeof(double)); for(mm=0;mm<rows;mm++){ for(nn=0;nn<cols;nn++){ send_buffer[mm*cols + nn] = C[mm*cols + nn]; }} double _timeStartAllReduce = getTime(); MPI_Allreduce(send_buffer,C,rows*cols,MPI_DOUBLE,MPI_SUM,level->MPI_COMM_ALLREDUCE); double _timeEndAllReduce = getTime(); level->timers.collectives += (double)(_timeEndAllReduce-_timeStartAllReduce); free(send_buffer); #endif }
elemwise_binary_scalar_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file elemwise_binary_scalar_op.h * \brief Function definition of elementwise binary scalar operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #include <mxnet/operator_util.h> #include <limits> #include <vector> #include <utility> #include <string> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "elemwise_unary_op.h" namespace mxnet { namespace op { struct NumpyBinaryScalarParam : public dmlc::Parameter<NumpyBinaryScalarParam> { double scalar; bool is_int; DMLC_DECLARE_PARAMETER(NumpyBinaryScalarParam) { DMLC_DECLARE_FIELD(scalar).set_default(1).describe("Scalar input value"); DMLC_DECLARE_FIELD(is_int).set_default(true).describe( "Indicate whether scalar input is int type"); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream scalar_s, is_int_s; scalar_s << std::setprecision(std::numeric_limits<double>::max_digits10) << scalar; is_int_s << is_int; (*dict)["scalar"] = scalar_s.str(); (*dict)["is_int"] = is_int_s.str(); } }; inline bool NumpyBinaryScalarType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); bool scalar_is_int = param.is_int; if (common::is_int(in_attrs->at(0)) && !scalar_is_int) { TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat64); } else if (in_attrs->at(0) == mshadow::kBool) { TYPE_ASSIGN_CHECK(*out_attrs, 0, scalar_is_int ? mshadow::kInt64 : mshadow::kFloat64); } else { TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); } return out_attrs->at(0) != -1; } class BinaryScalarOp : public UnaryOp { /*! \brief Tensor operation against a scalar with a dense result */ template <typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<cpu>* stream, const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& input, const OpReqType req, const NDArray& output) { const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); const double alpha = param.scalar; CHECK_EQ(output.shape(), input.shape()); const int64_t row_count = output.shape()[0]; const int64_t items_per_row = output.shape().Size() / row_count; const DType result_for_zero = OP::Map(DType(0), DType(alpha)); mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream); mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream); const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size(); if (sparse_row_count != row_count) { mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data(rowsparse::kIdx).FlatTo1D<cpu, IType>(stream); int64_t input_iter = 0; int64_t output_row = 0; IType next_input_row = 0; while (output_row < row_count) { next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter]) : row_count; // Split up into blocks of contiguous data and do those together // Do contiguous dense blocks const int64_t dense_block_count = next_input_row - output_row; if (dense_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch( stream, items_per_row * dense_block_count, output_data.dptr_ + items_per_row * output_row, result_for_zero); }); output_row += dense_block_count; continue; } // Do contiguous sparse blocks int64_t next_non_contiguous_sparse = input_iter; while (next_non_contiguous_sparse < sparse_row_count - 1) { if (row_indexes[next_non_contiguous_sparse + 1] != row_indexes[next_non_contiguous_sparse] + 1) { break; } ++next_non_contiguous_sparse; } const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1; if (sparse_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * sparse_block_count, &output_data.dptr_[items_per_row * output_row], &input_data.dptr_[items_per_row * input_iter], DType(alpha)); }); output_row += sparse_block_count; input_iter += sparse_block_count; continue; } } } else { // All rows exist (eventually we don't have to do complex // things to call GPU kernels because we don't need to access row indices) MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * row_count, output_data.dptr_, input_data.dptr_, DType(alpha)); }); } } /*! \brief Tensor operation against a scalar with a dense result */ template <typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<gpu>* stream, const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& input, const OpReqType req, const NDArray& output) { LOG(FATAL) << "NOT IMPLEMENTED"; } /*! \brief Tensor operation against a scalar with a dense result */ template <typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<cpu>* stream, const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& input, const OpReqType req, const NDArray& output) { CHECK_EQ(output.shape(), input.shape()); const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); const double alpha = param.scalar; const DType dense_fill_val = OP::Map(DType(0), DType(alpha)); const TBlob column_indexes = input.aux_data(csr::kIdx); const size_t item_count = column_indexes.Size(); // Pre-fill dense with 0-input/output value FillDense<DType>( stream, output.shape().Size(), dense_fill_val, req, output.data().dptr<DType>()); mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data()); if (item_count) { const DType* in = input.data().dptr<DType>(); const IType* column_indexes_ptr = column_indexes.dptr<IType>(); const auto row_count = static_cast<size_t>(input.shape()[0]); const TBlob row_starts = input.aux_data(csr::kIndPtr); const CType* row_starts_ptr = row_starts.dptr<CType>(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(row_count); ++i) { const bool last_row = i == static_cast<int>(row_count) - 1; // Split up into blocks of contiguous data and do those together const size_t row_item_start_iter = row_starts_ptr[i]; const size_t input_items_this_row = !last_row ? static_cast<size_t>(row_starts_ptr[i + 1]) - row_item_start_iter : item_count - row_item_start_iter; if (input_items_this_row) { const IType* this_row_column_indexes = column_indexes_ptr + row_item_start_iter; const DType* row_data_start = in + row_item_start_iter; DType* output_this_row = out[i].dptr_; // More overhead to use OMP for small loops, so don't if (input_items_this_row > 1000) { #pragma omp parallel for for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } else { for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } } } } } /*! \brief Tensor operation against a scalar with a dense result */ template <typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<gpu>* stream, const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& input, const OpReqType req, const NDArray& output) { LOG(FATAL) << "NOT IMPLEMENTED"; } template <typename xpu, typename OP, typename DType, typename IType> static void ComputeExDenseResult(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& input, const OpReqType req, const NDArray output) { mshadow::Stream<xpu>* stream = ctx.get_stream<xpu>(); CHECK_EQ(output.storage_type(), kDefaultStorage); switch (input.storage_type()) { case kRowSparseStorage: { ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output); break; } case kCSRStorage: { MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, { ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output); }); break; } default: CHECK(false) << "Unsupported sparse storage type"; break; } } public: template <typename OP> static void Compute_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, mshadow::Stream<cpu>* s, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; TBlob temp_tblob; const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); bool scalar_is_int = param.is_int; const double alpha = param.scalar; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { if ((common::is_int(inputs[0].type_flag_) && !scalar_is_int) || (inputs[0].type_flag_ == kBool)) { Tensor<cpu, 1, DType> temp_tensor = ctx.requested[0].get_space_typed<cpu, 1, DType>(Shape1(inputs[0].Size()), s); temp_tblob = TBlob(temp_tensor); CastCompute<cpu>(attrs, ctx, {inputs[0]}, {kWriteTo}, {temp_tblob}); } else { temp_tblob = inputs[0]; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), temp_tblob.dptr<DType>(), DType(alpha)); }); }); } template <typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { mshadow::Stream<xpu>* s = ctx.get_stream<xpu>(); Compute_<OP>(attrs, ctx, s, inputs, req, outputs); } template <typename xpu, typename OP> static void ComputeInt(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu>* s = ctx.get_stream<xpu>(); const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); const double alpha = param.scalar; MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template <typename xpu, typename OP> static void ComputeLogic(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu>* s = ctx.get_stream<xpu>(); const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); bool scalar_is_int = param.is_int; const double alpha = param.scalar; TBlob temp_tblob; if (common::is_int(inputs[0].type_flag_) && !scalar_is_int) { Tensor<xpu, 1, double> temp_tensor = ctx.requested[0].get_space_typed<xpu, 1, double>(Shape1(inputs[0].Size()), s); temp_tblob = TBlob(temp_tensor); CastCompute<xpu>(attrs, ctx, {inputs[0]}, {kWriteTo}, {temp_tblob}); } else { temp_tblob = inputs[0]; } MSHADOW_TYPE_SWITCH_EXT_WITH_BOOL(temp_tblob.type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<bool>(), temp_tblob.dptr<DType>(), DType(alpha)); }); }); } template <typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else if (out_stype == kDefaultStorage && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]); }); }); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template <typename xpu, typename OP> static void LogicComputeEx(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template <typename OP> static void Backward_(const nnvm::NodeAttrs& attrs, mshadow::Stream<cpu>* s, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed); const double alpha = param.scalar; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet::op::mxnet_op::Kernel< mxnet::op::mxnet_op::op_with_req<mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, cpu>::Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), DType(alpha)); }); }); } template <typename xpu, typename OP> static void Backward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu>* s = ctx.get_stream<xpu>(); Backward_<OP>(attrs, s, inputs, req, outputs); } }; #define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(1) \ .set_num_outputs(1) \ .set_attr_parser(ParamParser<NumpyBinaryScalarParam>) \ .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \ .set_attr<nnvm::FInferType>("FInferType", NumpyBinaryScalarType) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs) { \ return std::vector<std::pair<int, int> >{{0, 0}}; \ }) \ .set_attr<FResourceRequest>( \ "FResourceRequest", \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \ }) \ .add_argument("data", "NDArray-or-Symbol", "source input") \ .add_arguments(NumpyBinaryScalarParam::__FIELDS__()) #if MXNET_USE_CUDA struct BinaryScalarRTCCompute { std::string OP; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs); }; struct BinaryScalarRTCBackward { std::string OP; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; #endif } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "dark_cuda.h" #include "box.h" #include "http_stream.h" #include <stdio.h> #include <stdlib.h> #include <string.h> extern int check_mistakes; #define NUMCHARS 37 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = random_gen()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_sequential_paths(char **paths, int n, int m, int mini_batch, int augment_speed) { int speed = rand_int(1, augment_speed); if (speed < 1) speed = 1; char** sequentia_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d, mini_batch = %d \n", n, mini_batch); unsigned int *start_time_indexes = (unsigned int *)xcalloc(mini_batch, sizeof(unsigned int)); for (i = 0; i < mini_batch; ++i) { start_time_indexes[i] = random_gen() % m; //printf(" start_time_indexes[i] = %u, ", start_time_indexes[i]); } for (i = 0; i < n; ++i) { do { int time_line_index = i % mini_batch; unsigned int index = start_time_indexes[time_line_index] % m; start_time_indexes[time_line_index] += speed; //int index = random_gen() % m; sequentia_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf(" index = %u - grp: %s \n", index, paths[index]); if (strlen(sequentia_paths[i]) <= 4) printf(" Very small path to the image: %s \n", sequentia_paths[i]); } while (strlen(sequentia_paths[i]) == 0); } free(start_time_indexes); pthread_mutex_unlock(&mutex); return sequentia_paths; } char **get_random_paths(char **paths, int n, int m) { char** random_paths = (char**)xcalloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); //printf("n = %d \n", n); for(i = 0; i < n; ++i){ do { int index = random_gen() % m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); //printf("grp: %s\n", paths[index]); if (strlen(random_paths[i]) <= 4) printf(" Very small path to the image: %s \n", random_paths[i]); } while (strlen(random_paths[i]) == 0); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char** replace_paths = (char**)xcalloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int dontuse_opencv) { int i; matrix X; X.rows = n; X.vals = (float**)xcalloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ int size = w > h ? w : h; image im; if(dontuse_opencv) im = load_image_stb_resize(paths[i], 0, 0, 3); else im = load_image_color(paths[i], 0, 0); image crop = random_augment_image(im, angle, aspect, min, max, size); int flip = use_flip ? random_gen() % 2 : 0; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); image sized = resize_image(crop, w, h); //show_image(im, "orig"); //show_image(sized, "sized"); //show_image(sized, paths[i]); //wait_until_press_key_cv(); //printf("w = %d, h = %d \n", sized.w, sized.h); free_image(im); free_image(crop); X.vals[i] = sized.data; X.cols = sized.h*sized.w*sized.c; } return X; } box_label *read_boxes(char *filename, int *n) { box_label* boxes = (box_label*)xcalloc(1, sizeof(box_label)); FILE *file = fopen(filename, "r"); if (!file) { printf("Can't open label file. (This can be normal only if you use MSCOCO): %s \n", filename); //file_error(filename); FILE* fw = fopen("bad.list", "a"); fwrite(filename, sizeof(char), strlen(filename), fw); char *new_line = "\n"; fwrite(new_line, sizeof(char), strlen(new_line), fw); fclose(fw); if (check_mistakes) { printf("\n Error in read_boxes() \n"); getchar(); } *n = 0; return boxes; } float x, y, h, w; int id; int count = 0; while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ boxes = (box_label*)xrealloc(boxes, (count + 1) * sizeof(box_label)); boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = random_gen()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } if ((boxes[i].x + boxes[i].w / 2) < 0 || (boxes[i].y + boxes[i].h / 2) < 0 || (boxes[i].x - boxes[i].w / 2) > 1 || (boxes[i].y - boxes[i].h / 2) > 1) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 30; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .001 || h < .001) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } int fill_truth_detection(const char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy, int net_w, int net_h) { char labelpath[4096]; replace_image_to_label(path, labelpath); int count = 0; int i; box_label *boxes = read_boxes(labelpath, &count); int min_w_h = 0; float lowest_w = 1.F / net_w; float lowest_h = 1.F / net_h; randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if (count > num_boxes) count = num_boxes; float x, y, w, h; int id; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; // not detect small objects //if ((w < 0.001F || h < 0.001F)) continue; // if truth (box for object) is smaller than 1x1 pix char buff[256]; if (id >= classes) { printf("\n Wrong annotation: class_id = %d. But class_id should be [from 0 to %d], file: %s \n", id, (classes-1), labelpath); sprintf(buff, "echo %s \"Wrong annotation: class_id = %d. But class_id should be [from 0 to %d]\" >> bad_label.list", labelpath, id, (classes-1)); system(buff); if (check_mistakes) getchar(); ++sub; continue; } if ((w < lowest_w || h < lowest_h)) { //sprintf(buff, "echo %s \"Very small object: w < lowest_w OR h < lowest_h\" >> bad_label.list", labelpath); //system(buff); ++sub; continue; } if (x == 999999 || y == 999999) { printf("\n Wrong annotation: x = 0, y = 0, < 0 or > 1, file: %s \n", labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = 0 or y = 0\" >> bad_label.list", labelpath); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (x <= 0 || x > 1 || y <= 0 || y > 1) { printf("\n Wrong annotation: x = %f, y = %f, file: %s \n", x, y, labelpath); sprintf(buff, "echo %s \"Wrong annotation: x = %f, y = %f\" >> bad_label.list", labelpath, x, y); system(buff); ++sub; if (check_mistakes) getchar(); continue; } if (w > 1) { printf("\n Wrong annotation: w = %f, file: %s \n", w, labelpath); sprintf(buff, "echo %s \"Wrong annotation: w = %f\" >> bad_label.list", labelpath, w); system(buff); w = 1; if (check_mistakes) getchar(); } if (h > 1) { printf("\n Wrong annotation: h = %f, file: %s \n", h, labelpath); sprintf(buff, "echo %s \"Wrong annotation: h = %f\" >> bad_label.list", labelpath, h); system(buff); h = 1; if (check_mistakes) getchar(); } if (x == 0) x += lowest_w; if (y == 0) y += lowest_h; truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; if (min_w_h == 0) min_w_h = w*net_w; if (min_w_h > w*net_w) min_w_h = w*net_w; if (min_w_h > h*net_h) min_w_h = h*net_h; } free(boxes); return min_w_h; } void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_truth_smooth(char *path, char **labels, int k, float *truth, float label_smooth_eps) { int i; memset(truth, 0, k * sizeof(float)); int count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { truth[i] = (1 - label_smooth_eps); ++count; } else { truth[i] = label_smooth_eps / (k - 1); } } if (count != 1) { printf("Too many or too few labels: %d, %s\n", count, path); count = 0; for (i = 0; i < k; ++i) { if (strstr(path, labels[i])) { printf("\t label %d: %s \n", count, labels[i]); count++; } } } } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy, float label_smooth_eps) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth_smooth(paths[i], labels, k, y.vals[i], label_smooth_eps); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "imgs", "labels", label); find_replace(label, "_iconl.jpeg", ".txt", label); FILE *file = fopen(label, "r"); if(!file){ find_replace(label, "labels", "labels2", label); file = fopen(label, "r"); if(!file) continue; } ++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } printf("%d/%d\n", count, n); return y; } char **get_labels_custom(char *filename, int *size) { list *plist = get_paths(filename); if(size) *size = plist->size; char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } char **get_labels(char *filename) { return get_labels_custom(filename, NULL); } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)xcalloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = random_gen()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*30; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = random_gen()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } void blend_truth(float *new_truth, int boxes, float *old_truth) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + t*t_size; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; new_truth_ptr[0] = old_truth_ptr[0]; new_truth_ptr[1] = old_truth_ptr[1]; new_truth_ptr[2] = old_truth_ptr[2]; new_truth_ptr[3] = old_truth_ptr[3]; new_truth_ptr[4] = old_truth_ptr[4]; } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } void blend_truth_mosaic(float *new_truth, int boxes, float *old_truth, int w, int h, float cut_x, float cut_y, int i_mixup, int left_shift, int right_shift, int top_shift, int bot_shift) { const int t_size = 4 + 1; int count_new_truth = 0; int t; for (t = 0; t < boxes; ++t) { float x = new_truth[t*(4 + 1)]; if (!x) break; count_new_truth++; } int new_t = count_new_truth; for (t = count_new_truth; t < boxes; ++t) { float *new_truth_ptr = new_truth + new_t*t_size; new_truth_ptr[0] = 0; float *old_truth_ptr = old_truth + (t - count_new_truth)*t_size; float x = old_truth_ptr[0]; if (!x) break; float xb = old_truth_ptr[0]; float yb = old_truth_ptr[1]; float wb = old_truth_ptr[2]; float hb = old_truth_ptr[3]; // shift 4 images if (i_mixup == 0) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 1) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb - (float)(h - cut_y - bot_shift) / h; } if (i_mixup == 2) { xb = xb - (float)(w - cut_x - right_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } if (i_mixup == 3) { xb = xb + (float)(cut_x - left_shift) / w; yb = yb + (float)(cut_y - top_shift) / h; } int left = (xb - wb / 2)*w; int right = (xb + wb / 2)*w; int top = (yb - hb / 2)*h; int bot = (yb + hb / 2)*h; /* { // fix out of Mosaic-bound float left_bound = 0, right_bound = 0, top_bound = 0, bot_bound = 0; if (i_mixup == 0) { left_bound = 0; right_bound = cut_x; top_bound = 0; bot_bound = cut_y; } if (i_mixup == 1) { left_bound = cut_x; right_bound = w; top_bound = 0; bot_bound = cut_y; } if (i_mixup == 2) { left_bound = 0; right_bound = cut_x; top_bound = cut_y; bot_bound = h; } if (i_mixup == 3) { left_bound = cut_x; right_bound = w; top_bound = cut_y; bot_bound = h; } if (left < left_bound) { //printf(" i_mixup = %d, left = %d, left_bound = %f \n", i_mixup, left, left_bound); left = left_bound; } if (right > right_bound) { //printf(" i_mixup = %d, right = %d, right_bound = %f \n", i_mixup, right, right_bound); right = right_bound; } if (top < top_bound) top = top_bound; if (bot > bot_bound) bot = bot_bound; xb = ((float)(right + left) / 2) / w; wb = ((float)(right - left)) / w; yb = ((float)(bot + top) / 2) / h; hb = ((float)(bot - top)) / h; } */ { // fix out of bound if (left < 0) { float diff = (float)left / w; xb = xb - diff / 2; wb = wb + diff; } if (right > w) { float diff = (float)(right - w) / w; xb = xb - diff / 2; wb = wb - diff; } if (top < 0) { float diff = (float)top / h; yb = yb - diff / 2; hb = hb + diff; } if (bot > h) { float diff = (float)(bot - h) / h; yb = yb - diff / 2; hb = hb - diff; } left = (xb - wb / 2)*w; right = (xb + wb / 2)*w; top = (yb - hb / 2)*h; bot = (yb + hb / 2)*h; } // leave only within the image if(left >= 0 && right <= w && top >= 0 && bot <= h && wb > 0 && wb < 1 && hb > 0 && hb < 1 && xb > 0 && xb < 1 && yb > 0 && yb < 1) { new_truth_ptr[0] = xb; new_truth_ptr[1] = yb; new_truth_ptr[2] = wb; new_truth_ptr[3] = hb; new_truth_ptr[4] = old_truth_ptr[4]; new_t++; } } //printf("\n was %d bboxes, now %d bboxes \n", count_new_truth, t); } #ifdef OPENCV #include "http_stream.h" data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int use_gaussian_noise, int use_blur, int use_mixup, float jitter, float resize, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; if (use_mixup == 2 || use_mixup == 4) { printf("\n cutmix=1 - isn't supported for Detector (use cutmix=1 only for Classifier) \n"); if (check_mistakes) getchar(); if(use_mixup == 2) use_mixup = 0; else use_mixup = 3; } if (use_mixup == 3 && letter_box) { printf("\n Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters \n"); if (check_mistakes) getchar(); exit(0); } if (random_gen() % 2 == 0) use_mixup = 0; int i; int *cut_x = NULL, *cut_y = NULL; if (use_mixup == 3) { cut_x = (int*)calloc(n, sizeof(int)); cut_y = (int*)calloc(n, sizeof(int)); const float min_offset = 0.2; // 20% for (i = 0; i < n; ++i) { cut_x[i] = rand_int(w*min_offset, w*(1 - min_offset)); cut_y[i] = rand_int(h*min_offset, h*(1 - min_offset)); } } data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale = 0; float resize_r1 = 0, resize_r2 = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0, blur = 0; int augmentation_calculated = 0, gaussian_noise = 0; d.y = make_matrix(n, 5*boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= use_mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; // recalculate augmentation for the 2nd sequence if(track==1) char **random_paths; if (track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(5 * boxes, sizeof(float)); const char *filename = random_paths[i]; int flag = (c >= 3); mat_cv *src; src = load_image_mat_cv(filename, flag); if (src == NULL) { printf("\n Error in load_data_detection() - OpenCV \n"); fflush(stdout); if (check_mistakes) { getchar(); } continue; } int oh = get_height_mat(src); int ow = get_width_mat(src); int dw = (ow*jitter); int dh = (oh*jitter); float resize_down = resize, resize_up = resize; if (resize_down > 1.0) resize_down = 1 / resize_down; int min_rdw = ow*(1 - (1 / resize_down)) / 2; // < 0 int min_rdh = oh*(1 - (1 / resize_down)) / 2; // < 0 if (resize_up < 1.0) resize_up = 1 / resize_up; int max_rdw = ow*(1 - (1 / resize_up)) / 2; // > 0 int max_rdh = oh*(1 - (1 / resize_up)) / 2; // > 0 //printf(" down = %f, up = %f \n", (1 - (1 / resize_down)) / 2, (1 - (1 / resize_up)) / 2); if (!augmentation_calculated || !track) { augmentation_calculated = 1; resize_r1 = random_float(); resize_r2 = random_float(); r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; if (use_blur) { int tmp_blur = rand_int(0, 2); // 0 - disable, 1 - blur background, 2 - blur the whole image if (tmp_blur == 0) blur = 0; else if (tmp_blur == 1) blur = 1; else blur = use_blur; } if (use_gaussian_noise && rand_int(0, 1) == 1) gaussian_noise = use_gaussian_noise; else gaussian_noise = 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); if (resize < 1) { // downsize only pleft += rand_precalc_random(min_rdw, 0, resize_r1); pright += rand_precalc_random(min_rdw, 0, resize_r2); ptop += rand_precalc_random(min_rdh, 0, resize_r1); pbot += rand_precalc_random(min_rdh, 0, resize_r2); } else { pleft += rand_precalc_random(min_rdw, max_rdw, resize_r1); pright += rand_precalc_random(min_rdw, max_rdw, resize_r2); ptop += rand_precalc_random(min_rdh, max_rdh, resize_r1); pbot += rand_precalc_random(min_rdh, max_rdh, resize_r2); } //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); //float scale = rand_precalc_random(.25, 2, r_scale); // unused currently if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh)/2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow)/2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } //printf("\n pleft = %d, pright = %d, ptop = %d, pbot = %d, ow = %d, oh = %d \n", pleft, pright, ptop, pbot, ow, oh); } /* // move each 2nd image to the corner - so that most of it was visible if (use_mixup == 3 && random_gen() % 2 == 0) { if (flip) { if (i_mixup == 0) pleft += pright, pright = 0, pbot += ptop, ptop = 0; if (i_mixup == 1) pright += pleft, pleft = 0, pbot += ptop, ptop = 0; if (i_mixup == 2) pleft += pright, pright = 0, ptop += pbot, pbot = 0; if (i_mixup == 3) pright += pleft, pleft = 0, ptop += pbot, pbot = 0; } else { if (i_mixup == 0) pright += pleft, pleft = 0, pbot += ptop, ptop = 0; if (i_mixup == 1) pleft += pright, pright = 0, pbot += ptop, ptop = 0; if (i_mixup == 2) pright += pleft, pleft = 0, ptop += pbot, pbot = 0; if (i_mixup == 3) pleft += pright, pright = 0, ptop += pbot, pbot = 0; } } */ int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; int min_w_h = fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if ((min_w_h / 8) < blur && blur > 1) blur = min_w_h / 8; // disable blur if one of the objects is too small image ai = image_data_augmentation(src, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, gaussian_noise, blur, boxes, truth); if (use_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); } else if (use_mixup == 1) { if (i_mixup == 0) { d.X.vals[i] = ai.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); } else if (i_mixup == 1) { image old_img = make_empty_image(w, h, c); old_img.data = d.X.vals[i]; //show_image(ai, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images_cv(ai, 0.5, old_img, 0.5); blend_truth(d.y.vals[i], boxes, truth); free_image(old_img); d.X.vals[i] = ai.data; } } else if (use_mixup == 3) { if (i_mixup == 0) { image tmp_img = make_image(w, h, c); d.X.vals[i] = tmp_img.data; } if (flip) { int tmp = pleft; pleft = pright; pright = tmp; } const int left_shift = min_val_cmp(cut_x[i], max_val_cmp(0, (-pleft*w / ow))); const int top_shift = min_val_cmp(cut_y[i], max_val_cmp(0, (-ptop*h / oh))); const int right_shift = min_val_cmp((w - cut_x[i]), max_val_cmp(0, (-pright*w / ow))); const int bot_shift = min_val_cmp(h - cut_y[i], max_val_cmp(0, (-pbot*h / oh))); int k, x, y; for (k = 0; k < c; ++k) { for (y = 0; y < h; ++y) { int j = y*w + k*w*h; if (i_mixup == 0 && y < cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 1 && y < cut_y[i]) { int j_src = left_shift + (y + h - cut_y[i] - bot_shift)*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w-cut_x[i]) * sizeof(float)); } if (i_mixup == 2 && y >= cut_y[i]) { int j_src = (w - cut_x[i] - right_shift) + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + 0], &ai.data[j_src], cut_x[i] * sizeof(float)); } if (i_mixup == 3 && y >= cut_y[i]) { int j_src = left_shift + (top_shift + y - cut_y[i])*w + k*w*h; memcpy(&d.X.vals[i][j + cut_x[i]], &ai.data[j_src], (w - cut_x[i]) * sizeof(float)); } } } blend_truth_mosaic(d.y.vals[i], boxes, truth, w, h, cut_x[i], cut_y[i], i_mixup, left_shift, right_shift, top_shift, bot_shift); free_image(ai); ai.data = d.X.vals[i]; } if (show_imgs && i_mixup == use_mixup) // delete i_mixup { image tmp_ai = copy_image(ai); char buff[1000]; //sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); sprintf(buff, "aug_%d_%d_%d", random_index, i, random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*ai.w; int right = (b.x + b.w / 2.)*ai.w; int top = (b.y - b.h / 2.)*ai.h; int bot = (b.y + b.h / 2.)*ai.h; draw_box_width(tmp_ai, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(tmp_ai, buff); if (show_imgs == 1) { //char buff_src[1000]; //sprintf(buff_src, "src_%d_%d_%s_%d", random_index, i, basecfg((char*)filename), random_gen()); //show_image_mat(src, buff_src); show_image(tmp_ai, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); free_image(tmp_ai); } release_mat(&src); free(truth); } if (random_paths) free(random_paths); } return d; } #else // OPENCV void blend_images(image new_img, float alpha, image old_img, float beta) { int data_size = new_img.w * new_img.h * new_img.c; int i; #pragma omp parallel for for (i = 0; i < data_size; ++i) new_img.data[i] = new_img.data[i] * alpha + old_img.data[i] * beta; } data load_data_detection(int n, char **paths, int m, int w, int h, int c, int boxes, int classes, int use_flip, int gaussian_noise, int use_blur, int use_mixup, float jitter, float resize, float hue, float saturation, float exposure, int mini_batch, int track, int augment_speed, int letter_box, int show_imgs) { const int random_index = random_gen(); c = c ? c : 3; char **random_paths; char **mixup_random_paths = NULL; if(track) random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else random_paths = get_random_paths(paths, n, m); //assert(use_mixup < 2); if (use_mixup == 2) { printf("\n cutmix=1 - isn't supported for Detector \n"); exit(0); } if (use_mixup == 3 || use_mixup == 4) { printf("\n mosaic=1 - compile Darknet with OpenCV for using mosaic=1 \n"); exit(0); } int mixup = use_mixup ? random_gen() % 2 : 0; //printf("\n mixup = %d \n", mixup); if (mixup) { if (track) mixup_random_paths = get_sequential_paths(paths, n, m, mini_batch, augment_speed); else mixup_random_paths = get_random_paths(paths, n, m); } int i; data d = { 0 }; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)xcalloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*c; float r1 = 0, r2 = 0, r3 = 0, r4 = 0, r_scale; float resize_r1 = 0, resize_r2 = 0; float dhue = 0, dsat = 0, dexp = 0, flip = 0; int augmentation_calculated = 0; d.y = make_matrix(n, 5 * boxes); int i_mixup = 0; for (i_mixup = 0; i_mixup <= mixup; i_mixup++) { if (i_mixup) augmentation_calculated = 0; for (i = 0; i < n; ++i) { float *truth = (float*)xcalloc(5 * boxes, sizeof(float)); char *filename = (i_mixup) ? mixup_random_paths[i] : random_paths[i]; image orig = load_image(filename, 0, 0, c); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); float resize_down = resize, resize_up = resize; if (resize_down > 1.0) resize_down = 1 / resize_down; int min_rdw = ow*(1 - (1 / resize_down)) / 2; int min_rdh = oh*(1 - (1 / resize_down)) / 2; if (resize_up < 1.0) resize_up = 1 / resize_up; int max_rdw = ow*(1 - (1 / resize_up)) / 2; int max_rdh = oh*(1 - (1 / resize_up)) / 2; if (!augmentation_calculated || !track) { augmentation_calculated = 1; resize_r1 = random_float(); resize_r2 = random_float(); r1 = random_float(); r2 = random_float(); r3 = random_float(); r4 = random_float(); r_scale = random_float(); dhue = rand_uniform_strong(-hue, hue); dsat = rand_scale(saturation); dexp = rand_scale(exposure); flip = use_flip ? random_gen() % 2 : 0; } int pleft = rand_precalc_random(-dw, dw, r1); int pright = rand_precalc_random(-dw, dw, r2); int ptop = rand_precalc_random(-dh, dh, r3); int pbot = rand_precalc_random(-dh, dh, r4); if (resize < 1) { // downsize only pleft += rand_precalc_random(min_rdw, 0, resize_r1); pright += rand_precalc_random(min_rdw, 0, resize_r2); ptop += rand_precalc_random(min_rdh, 0, resize_r1); pbot += rand_precalc_random(min_rdh, 0, resize_r2); } else { pleft += rand_precalc_random(min_rdw, max_rdw, resize_r1); pright += rand_precalc_random(min_rdw, max_rdw, resize_r2); ptop += rand_precalc_random(min_rdh, max_rdh, resize_r1); pbot += rand_precalc_random(min_rdh, max_rdh, resize_r2); } if (letter_box) { float img_ar = (float)ow / (float)oh; float net_ar = (float)w / (float)h; float result_ar = img_ar / net_ar; //printf(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar); if (result_ar > 1) // sheight - should be increased { float oh_tmp = ow / net_ar; float delta_h = (oh_tmp - oh) / 2; ptop = ptop - delta_h; pbot = pbot - delta_h; //printf(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot); } else // swidth - should be increased { float ow_tmp = oh * net_ar; float delta_w = (ow_tmp - ow) / 2; pleft = pleft - delta_w; pright = pright - delta_w; //printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright); } } int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft / ow) / sx; float dy = ((float)ptop / oh) / sy; image sized = resize_image(cropped, w, h); if (flip) flip_image(sized); distort_image(sized, dhue, dsat, dexp); //random_distort_image(sized, hue, saturation, exposure); fill_truth_detection(filename, boxes, truth, classes, flip, dx, dy, 1. / sx, 1. / sy, w, h); if (i_mixup) { image old_img = sized; old_img.data = d.X.vals[i]; //show_image(sized, "new"); //show_image(old_img, "old"); //wait_until_press_key_cv(); blend_images(sized, 0.5, old_img, 0.5); blend_truth(truth, boxes, d.y.vals[i]); free_image(old_img); } d.X.vals[i] = sized.data; memcpy(d.y.vals[i], truth, 5 * boxes * sizeof(float)); if (show_imgs)// && i_mixup) { char buff[1000]; sprintf(buff, "aug_%d_%d_%s_%d", random_index, i, basecfg(filename), random_gen()); int t; for (t = 0; t < boxes; ++t) { box b = float_to_box_stride(d.y.vals[i] + t*(4 + 1), 1); if (!b.x) break; int left = (b.x - b.w / 2.)*sized.w; int right = (b.x + b.w / 2.)*sized.w; int top = (b.y - b.h / 2.)*sized.h; int bot = (b.y + b.h / 2.)*sized.h; draw_box_width(sized, left, top, right, bot, 1, 150, 100, 50); // 3 channels RGB } save_image(sized, buff); if (show_imgs == 1) { show_image(sized, buff); wait_until_press_key_cv(); } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Press Enter: \n"); //getchar(); } free_image(orig); free_image(cropped); free(truth); } } free(random_paths); if (mixup_random_paths) free(mixup_random_paths); return d; } #endif // OPENCV void *load_thread(void *ptr) { //srand(time(0)); //printf("Loading data: %d\n", random_gen()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.mixup, a.blur, a.show_imgs, a.label_smooth_eps, a.dontuse_opencv); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.c, a.num_boxes, a.classes, a.flip, a.gaussian_noise, a.blur, a.mixup, a.jitter, a.resize, a.hue, a.saturation, a.exposure, a.mini_batch, a.track, a.augment_speed, a.letter_box, a.show_imgs); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = resize_image(*(a.im), a.w, a.h); }else if (a.type == LETTERBOX_DATA) { *(a.im) = load_image(a.path, 0, 0, a.c); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.flip, a.min, a.max, a.w, a.h, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } static const int thread_wait_ms = 5; static volatile int flag_exit; static volatile int * run_load_data = NULL; static load_args * args_swap = NULL; static pthread_t* threads = NULL; pthread_mutex_t mtx_load_data = PTHREAD_MUTEX_INITIALIZER; void *run_thread_loop(void *ptr) { const int i = *(int *)ptr; while (!custom_atomic_load_int(&flag_exit)) { while (!custom_atomic_load_int(&run_load_data[i])) { if (custom_atomic_load_int(&flag_exit)) { free(ptr); return 0; } this_thread_sleep_for(thread_wait_ms); } pthread_mutex_lock(&mtx_load_data); load_args *args_local = (load_args *)xcalloc(1, sizeof(load_args)); *args_local = args_swap[i]; pthread_mutex_unlock(&mtx_load_data); load_thread(args_local); custom_atomic_store_int(&run_load_data[i], 0); } free(ptr); return 0; } void *load_threads(void *ptr) { //srand(time(0)); int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data* buffers = (data*)xcalloc(args.threads, sizeof(data)); if (!threads) { threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); run_load_data = (volatile int *)xcalloc(args.threads, sizeof(int)); args_swap = (load_args *)xcalloc(args.threads, sizeof(load_args)); fprintf(stderr, " Create %d permanent cpu-threads \n", args.threads); for (i = 0; i < args.threads; ++i) { int* ptr = (int*)xcalloc(1, sizeof(int)); *ptr = i; if (pthread_create(&threads[i], 0, run_thread_loop, ptr)) error("Thread creation failed"); } } for (i = 0; i < args.threads; ++i) { args.d = buffers + i; args.n = (i + 1) * total / args.threads - i * total / args.threads; pthread_mutex_lock(&mtx_load_data); args_swap[i] = args; pthread_mutex_unlock(&mtx_load_data); custom_atomic_store_int(&run_load_data[i], 1); // run thread } for (i = 0; i < args.threads; ++i) { while (custom_atomic_load_int(&run_load_data[i])) this_thread_sleep_for(thread_wait_ms); // join } /* pthread_t* threads = (pthread_t*)xcalloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } */ *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); //free(threads); return 0; } void free_load_threads(void *ptr) { load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; int i; if (threads) { custom_atomic_store_int(&flag_exit, 1); for (i = 0; i < args.threads; ++i) { pthread_join(threads[i], 0); } free((void*)run_load_data); free(args_swap); free(threads); threads = NULL; custom_atomic_store_int(&flag_exit, 0); } } pthread_t load_data(load_args args) { pthread_t thread; struct load_args* ptr = (load_args*)xcalloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, flip, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)xcalloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)xcalloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = random_gen()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure, int use_mixup, int use_blur, int show_imgs, float label_smooth_eps, int dontuse_opencv) { char **paths_stored = paths; if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d.y = load_labels_paths(paths, n, labels, k, hierarchy, label_smooth_eps); if (use_mixup && rand_int(0, 1)) { char **paths_mix = get_random_paths(paths_stored, n, m); data d2 = { 0 }; d2.shallow = 0; d2.X = load_image_augment_paths(paths_mix, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d2.y = load_labels_paths(paths_mix, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix); data d3 = { 0 }; d3.shallow = 0; data d4 = { 0 }; d4.shallow = 0; if (use_mixup >= 3) { char **paths_mix3 = get_random_paths(paths_stored, n, m); d3.X = load_image_augment_paths(paths_mix3, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d3.y = load_labels_paths(paths_mix3, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix3); char **paths_mix4 = get_random_paths(paths_stored, n, m); d4.X = load_image_augment_paths(paths_mix4, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, dontuse_opencv); d4.y = load_labels_paths(paths_mix4, n, labels, k, hierarchy, label_smooth_eps); free(paths_mix4); } // mix int i, j; for (i = 0; i < d2.X.rows; ++i) { int mixup = use_mixup; if (use_mixup == 4) mixup = rand_int(2, 3); // alternate CutMix and Mosaic // MixUp ----------------------------------- if (mixup == 1) { // mix images for (j = 0; j < d2.X.cols; ++j) { d.X.vals[i][j] = (d.X.vals[i][j] + d2.X.vals[i][j]) / 2.0f; } // mix labels for (j = 0; j < d2.y.cols; ++j) { d.y.vals[i][j] = (d.y.vals[i][j] + d2.y.vals[i][j]) / 2.0f; } } // CutMix ----------------------------------- else if (mixup == 2) { const float min = 0.3; // 0.3*0.3 = 9% const float max = 0.8; // 0.8*0.8 = 64% const int cut_w = rand_int(w*min, w*max); const int cut_h = rand_int(h*min, h*max); const int cut_x = rand_int(0, w - cut_w - 1); const int cut_y = rand_int(0, h - cut_h - 1); const int left = cut_x; const int right = cut_x + cut_w; const int top = cut_y; const int bot = cut_y + cut_h; assert(cut_x >= 0 && cut_x <= w); assert(cut_y >= 0 && cut_y <= h); assert(cut_w >= 0 && cut_w <= w); assert(cut_h >= 0 && cut_h <= h); assert(right >= 0 && right <= w); assert(bot >= 0 && bot <= h); assert(top <= bot); assert(left <= right); const float alpha = (float)(cut_w*cut_h) / (float)(w*h); const float beta = 1 - alpha; int c, x, y; for (c = 0; c < 3; ++c) { for (y = top; y < bot; ++y) { for (x = left; x < right; ++x) { int j = x + y*w + c*w*h; d.X.vals[i][j] = d2.X.vals[i][j]; } } } //printf("\n alpha = %f, beta = %f \n", alpha, beta); // mix labels for (j = 0; j < d.y.cols; ++j) { d.y.vals[i][j] = d.y.vals[i][j] * beta + d2.y.vals[i][j] * alpha; } } // Mosaic ----------------------------------- else if (mixup == 3) { const float min_offset = 0.2; // 20% const int cut_x = rand_int(w*min_offset, w*(1 - min_offset)); const int cut_y = rand_int(h*min_offset, h*(1 - min_offset)); float s1 = (float)(cut_x * cut_y) / (w*h); float s2 = (float)((w - cut_x) * cut_y) / (w*h); float s3 = (float)(cut_x * (h - cut_y)) / (w*h); float s4 = (float)((w - cut_x) * (h - cut_y)) / (w*h); int c, x, y; for (c = 0; c < 3; ++c) { for (y = 0; y < h; ++y) { for (x = 0; x < w; ++x) { int j = x + y*w + c*w*h; if (x < cut_x && y < cut_y) d.X.vals[i][j] = d.X.vals[i][j]; if (x >= cut_x && y < cut_y) d.X.vals[i][j] = d2.X.vals[i][j]; if (x < cut_x && y >= cut_y) d.X.vals[i][j] = d3.X.vals[i][j]; if (x >= cut_x && y >= cut_y) d.X.vals[i][j] = d4.X.vals[i][j]; } } } for (j = 0; j < d.y.cols; ++j) { const float max_s = 1;// max_val_cmp(s1, max_val_cmp(s2, max_val_cmp(s3, s4))); d.y.vals[i][j] = d.y.vals[i][j] * s1 / max_s + d2.y.vals[i][j] * s2 / max_s + d3.y.vals[i][j] * s3 / max_s + d4.y.vals[i][j] * s4 / max_s; } } } free_data(d2); if (use_mixup >= 3) { free_data(d3); free_data(d4); } } #ifdef OPENCV if (use_blur) { int i; for (i = 0; i < d.X.rows; ++i) { if (random_gen() % 2) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; int ksize = use_blur; if (use_blur == 1) ksize = 17; image blurred = blur_image(im, ksize); free_image(im); d.X.vals[i] = blurred.data; //if (i == 0) { // show_image(im, "Not blurred"); // show_image(blurred, "blurred"); // wait_until_press_key_cv(); //} } } } #endif // OPENCV if (show_imgs) { int i, j; for (i = 0; i < d.X.rows; ++i) { image im = make_empty_image(w, h, 3); im.data = d.X.vals[i]; char buff[1000]; sprintf(buff, "aug_%d_%s_%d", i, basecfg((char*)paths[i]), random_gen()); save_image(im, buff); char buff_string[1000]; sprintf(buff_string, "\n Classes: "); for (j = 0; j < d.y.cols; ++j) { if (d.y.vals[i][j] > 0) { char buff_tmp[100]; sprintf(buff_tmp, " %d (%f), ", j, d.y.vals[i][j]); strcat(buff_string, buff_tmp); } } printf("%s \n", buff_string); if (show_imgs == 1) { show_image(im, buff); wait_until_press_key_cv(); } } printf("\nYou use flag -show_imgs, so will be saved aug_...jpg images. Click on window and press ESC button \n"); } if (m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int use_flip, int min, int max, int w, int h, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = w; d.h = h; d.shallow = 0; d.X = load_image_augment_paths(paths, n, use_flip, min, max, w, h, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)xcalloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data newdata = concat_data(d[i], out); free_data(out); out = newdata; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } //translate_data_rows(d, -128); scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = random_gen()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class_id = bytes[0]; y.vals[i+b*10000][class_id] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); //translate_data_rows(d, -128); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = random_gen()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)xcalloc(num, sizeof(float*)); r.y.vals = (float**)xcalloc(num, sizeof(float*)); int i; for(i = 0; i < num; ++i){ int index = random_gen()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data* split = (data*)xcalloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train ={0}; data test ={0}; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)xcalloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)xcalloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)xcalloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)xcalloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
GB_unaryop__ainv_int16_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_bool // op(A') function: GB_tran__ainv_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_bool ( int16_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
residualbased_simple_steady_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Michael Andre, https://github.com/msandre // #if !defined(KRATOS_RESIDUALBASED_SIMPLE_STEADY_SCHEME ) #define KRATOS_RESIDUALBASED_SIMPLE_STEADY_SCHEME // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" #include "utilities/coordinate_transformation_utilities.h" #include "processes/process.h" namespace Kratos { ///@name Kratos Classes ///@{ template<class TSparseSpace, class TDenseSpace > class ResidualBasedSimpleSteadyScheme : public Scheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedSimpleSteadyScheme); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef Element::GeometryType GeometryType; ///@} ///@name Life Cycle ///@{ ResidualBasedSimpleSteadyScheme(double VelocityRelaxationFactor, double PressureRelaxationFactor, unsigned int DomainSize) : Scheme<TSparseSpace, TDenseSpace>(), mVelocityRelaxationFactor(VelocityRelaxationFactor), mPressureRelaxationFactor(PressureRelaxationFactor), mRotationTool(DomainSize,DomainSize+1,SLIP) {} ResidualBasedSimpleSteadyScheme( double VelocityRelaxationFactor, double PressureRelaxationFactor, unsigned int DomainSize, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mVelocityRelaxationFactor(VelocityRelaxationFactor), mPressureRelaxationFactor(PressureRelaxationFactor), mRotationTool(DomainSize,DomainSize+1,SLIP), mpTurbulenceModel(pTurbulenceModel) {} ~ResidualBasedSimpleSteadyScheme() override {} ///@} ///@name Operators ///@{ double GetVelocityRelaxationFactor() const { return mVelocityRelaxationFactor; } void SetVelocityRelaxationFactor(double factor) { mVelocityRelaxationFactor = factor; } double GetPressureRelaxationFactor() const { return mPressureRelaxationFactor; } void SetPressureRelaxationFactor(double factor) { mPressureRelaxationFactor = factor; } void Update(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb) override { KRATOS_TRY; mRotationTool.RotateVelocities(rModelPart); TSparseSpace::InplaceMult(rDx, mVelocityRelaxationFactor); mpDofUpdater->UpdateDofs(rDofSet,rDx); mRotationTool.RecoverVelocities(rModelPart); KRATOS_CATCH(""); } void CalculateSystemContributions( Element::Pointer rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY; rCurrentElement->InitializeNonLinearIteration(CurrentProcessInfo); rCurrentElement->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); Matrix SteadyLHS; rCurrentElement->CalculateLocalVelocityContribution(SteadyLHS, RHS_Contribution, CurrentProcessInfo); rCurrentElement->EquationIdVector(EquationId, CurrentProcessInfo); if (SteadyLHS.size1() != 0) noalias(LHS_Contribution) += SteadyLHS; // apply slip condition mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement->GetGeometry()); KRATOS_CATCH(""); } void Condition_CalculateSystemContributions( Condition::Pointer rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Condition::EquationIdVectorType& EquationId, ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY; rCurrentCondition->InitializeNonLinearIteration(CurrentProcessInfo); rCurrentCondition->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); Matrix SteadyLHS; rCurrentCondition->CalculateLocalVelocityContribution(SteadyLHS, RHS_Contribution, CurrentProcessInfo); rCurrentCondition->EquationIdVector(EquationId, CurrentProcessInfo); if (SteadyLHS.size1() != 0) noalias(LHS_Contribution) += SteadyLHS; // apply slip condition mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry()); KRATOS_CATCH(""); } void Calculate_RHS_Contribution( Element::Pointer rCurrentElement, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; Matrix LHS_Contribution; CalculateSystemContributions(rCurrentElement,LHS_Contribution, rRHS_Contribution,rEquationId,rCurrentProcessInfo); KRATOS_CATCH(""); } void Condition_Calculate_RHS_Contribution( Condition::Pointer rCurrentCondition, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; Matrix LHS_Contribution; Condition_CalculateSystemContributions(rCurrentCondition,LHS_Contribution, rRHS_Contribution,rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void FinalizeNonLinIteration(ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb) override { if (mpTurbulenceModel) // If not null { mpTurbulenceModel->Execute(); } ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //if orthogonal subscales are computed if (CurrentProcessInfo[OSS_SWITCH] == 1.0) { KRATOS_INFO_IF("ResidualBasedSimpleSteadyScheme", rModelPart.GetCommunicator().MyPID() == 0) << "Computing OSS projections" << std::endl; const int number_of_nodes = rModelPart.NumberOfNodes(); #pragma omp parallel for for (int i = 0; i < number_of_nodes; i++) { ModelPart::NodeIterator it_node = rModelPart.NodesBegin() + i; noalias(it_node->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); it_node->FastGetSolutionStepValue(DIVPROJ) = 0.0; it_node->FastGetSolutionStepValue(NODAL_AREA) = 0.0; } const int number_of_elements = rModelPart.NumberOfElements(); array_1d<double, 3 > output; #pragma omp parallel for private(output) for (int i = 0; i < number_of_elements; i++) { ModelPart::ElementIterator it_elem = rModelPart.ElementsBegin() + i; it_elem->Calculate(ADVPROJ,output,CurrentProcessInfo); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); #pragma omp parallel for for (int i = 0; i < number_of_nodes; i++) { ModelPart::NodeIterator it_node = rModelPart.NodesBegin() + i; if (it_node->FastGetSolutionStepValue(NODAL_AREA) == 0.0) it_node->FastGetSolutionStepValue(NODAL_AREA) = 1.0; const double area_inverse = 1.0 / it_node->FastGetSolutionStepValue(NODAL_AREA); it_node->FastGetSolutionStepValue(ADVPROJ) *= area_inverse; it_node->FastGetSolutionStepValue(DIVPROJ) *= area_inverse; } } } void FinalizeSolutionStep(ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb) override { LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); ++itNode) { itNode->FastGetSolutionStepValue(REACTION_X,0) = 0.0; itNode->FastGetSolutionStepValue(REACTION_Y,0) = 0.0; itNode->FastGetSolutionStepValue(REACTION_Z,0) = 0.0; } for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin(); itElem != rModelPart.Elements().ptr_end(); ++itElem) { (*itElem)->InitializeNonLinearIteration(rCurrentProcessInfo); (*itElem)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo); Matrix SteadyLHS; (*itElem)->CalculateLocalVelocityContribution(SteadyLHS,RHS_Contribution,rCurrentProcessInfo); GeometryType& rGeom = (*itElem)->GetGeometry(); unsigned int NumNodes = rGeom.PointsNumber(); unsigned int Dimension = rGeom.WorkingSpaceDimension(); unsigned int index = 0; for (unsigned int i = 0; i < NumNodes; i++) { rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++]; rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++]; if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++]; index++; // skip pressure dof } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, rA, rDx, rb); } ///@} protected: ///@name Protected Operators ///@{ ///@} private: ///@name Member Variables ///@{ double mVelocityRelaxationFactor; double mPressureRelaxationFactor; CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool; Process::Pointer mpTurbulenceModel; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); ///@} }; ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_SIMPLE_STEADY_SCHEME defined */
Euclid_apply.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_Euclid.h" /* #include "Euclid_dh.h" */ /* #include "Mat_dh.h" */ /* #include "Factor_dh.h" */ /* #include "Parser_dh.h" */ /* #include "TimeLog_dh.h" */ /* #include "SubdomainGraph_dh.h" */ static void scale_rhs_private(Euclid_dh ctx, HYPRE_Real *rhs); static void permute_vec_n2o_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT); static void permute_vec_o2n_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT); #undef __FUNC__ #define __FUNC__ "Euclid_dhApply" void Euclid_dhApply(Euclid_dh ctx, HYPRE_Real *rhs, HYPRE_Real *lhs) { START_FUNC_DH HYPRE_Real *rhs_, *lhs_; HYPRE_Real t1, t2; t1 = hypre_MPI_Wtime(); /* default settings; for everything except PILU */ ctx->from = 0; ctx->to = ctx->m; /* case 1: no preconditioning */ if (! strcmp(ctx->algo_ilu, "none") || ! strcmp(ctx->algo_par, "none")) { HYPRE_Int i, m = ctx->m; for (i=0; i<m; ++i) lhs[i] = rhs[i]; goto END_OF_FUNCTION; } /*---------------------------------------------------------------- * permute and scale rhs vector *----------------------------------------------------------------*/ /* permute rhs vector */ if (ctx->sg != NULL) { /* hypre_printf("@@@@@@@@@@@@@@@@@ permute_vec_n2o_private\n"); */ permute_vec_n2o_private(ctx, rhs, lhs); CHECK_V_ERROR; rhs_ = lhs; lhs_ = ctx->work2; } else { rhs_ = rhs; lhs_ = lhs; } /* scale rhs vector */ if (ctx->isScaled) { /* hypre_printf("@@@@@@@@@@@@@@@@@ scale_rhs_private\n"); */ scale_rhs_private(ctx, rhs_); CHECK_V_ERROR; } /* note: rhs_ is permuted, scaled; the input, "rhs" vector has not been disturbed. */ /*---------------------------------------------------------------- * big switch to choose the appropriate triangular solve *----------------------------------------------------------------*/ /* sequential and mpi block jacobi cases */ if (np_dh == 1 || ! strcmp(ctx->algo_par, "bj") ) { Factor_dhSolveSeq(rhs_, lhs_, ctx); CHECK_V_ERROR; } /* pilu case */ else { Factor_dhSolve(rhs_, lhs_, ctx); CHECK_V_ERROR; } /*---------------------------------------------------------------- * unpermute lhs vector * (note: don't need to unscale, because we were clever) *----------------------------------------------------------------*/ if (ctx->sg != NULL) { permute_vec_o2n_private(ctx, lhs_, lhs); CHECK_V_ERROR; } END_OF_FUNCTION: ; t2 = hypre_MPI_Wtime(); /* collective timing for triangular solves */ ctx->timing[TRI_SOLVE_T] += (t2 - t1); /* collective timing for setup+krylov+triSolves (intent is to time linear solve, but this is at best probelematical!) */ ctx->timing[TOTAL_SOLVE_TEMP_T] = t2 - ctx->timing[SOLVE_START_T]; /* total triangular solve count */ ctx->its += 1; ctx->itsTotal += 1; END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "scale_rhs_private" void scale_rhs_private(Euclid_dh ctx, HYPRE_Real *rhs) { START_FUNC_DH HYPRE_Int i, m = ctx->m; REAL_DH *scale = ctx->scale; /* if matrix was scaled, must scale the rhs */ if (scale != NULL) { #ifdef USING_OPENMP_DH #pragma omp for schedule(static) #endif for (i=0; i<m; ++i) { rhs[i] *= scale[i]; } } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "permute_vec_o2n_private" void permute_vec_o2n_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT) { START_FUNC_DH HYPRE_Int i, m = ctx->m; HYPRE_Int *o2n = ctx->sg->o2n_col; for (i=0; i<m; ++i) xOUT[i] = xIN[o2n[i]]; END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "permute_vec_n2o_private" void permute_vec_n2o_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT) { START_FUNC_DH HYPRE_Int i, m = ctx->m; HYPRE_Int *n2o = ctx->sg->n2o_row; for (i=0; i<m; ++i) xOUT[i] = xIN[n2o[i]]; END_FUNC_DH }
master.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv) { int i, n=20, tid, a[n],suma=0,sumalocal; if(argc < 2) { fprintf(stderr,"\nFalta iteraciones\n"); exit(-1); } n = atoi(argv[1]); if (n>20) n=20; for (i=0; i<n; i++) a[i] = i; #pragma omp parallel private(sumalocal,tid) { sumalocal=0; tid=omp_get_thread_num(); #pragma omp for schedule(static) for (i=0; i<n; i++) { sumalocal += a[i]; printf(" thread %d suma de a[%d]=%d sumalocal=%d \n", tid,i,a[i],sumalocal); } #pragma omp atomic suma += sumalocal; #pragma omp barrier #pragma omp master printf("thread master=%d imprime suma=%d\n", tid,suma); } return 0; }
divsufsort.c
/* * divsufsort.c for libdivsufsort * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #define HAVE_CONFIG_H #include "divsufsort_private.h" #ifdef _OPENMP # include <omp.h> #endif /*- Private Functions -*/ /* Sorts suffixes of type B*. */ static saidx_t sort_typeBstar(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n) { saidx_t *PAb, *ISAb, *buf; #ifdef _OPENMP saidx_t *curbuf; saidx_t l; #endif saidx_t i, j, k, t, m, bufsize; saint_t c0, c1; #ifdef _OPENMP saint_t d0, d1; int tmp; #endif /* Initialize bucket arrays. */ for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; } for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; } /* Count the number of occurrences of the first one or two characters of each type A, B and B* suffix. Moreover, store the beginning position of all type B* suffixes into the array SA. */ for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) { /* type A suffix. */ do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1)); if(0 <= i) { /* type B* suffix. */ ++BUCKET_BSTAR(c0, c1); SA[--m] = i; /* type B suffix. */ for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { ++BUCKET_B(c0, c1); } } } m = n - m; /* note: A type B* suffix is lexicographically smaller than a type B suffix that begins with the same first two characters. */ /* Calculate the index of start/end point of each bucket. */ for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) { t = i + BUCKET_A(c0); BUCKET_A(c0) = i + j; /* start point */ i = t + BUCKET_B(c0, c0); for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) { j += BUCKET_BSTAR(c0, c1); BUCKET_BSTAR(c0, c1) = j; /* end point */ i += BUCKET_B(c0, c1); } } if(0 < m) { /* Sort the type B* suffixes by their first two characters. */ PAb = SA + n - m; ISAb = SA + m; for(i = m - 2; 0 <= i; --i) { t = PAb[i], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = i; } t = PAb[m - 1], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = m - 1; /* Sort the type B* substrings using sssort. */ #ifdef _OPENMP tmp = omp_get_max_threads(); buf = SA + m, bufsize = (n - (2 * m)) / tmp; c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m; #pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp) { tmp = omp_get_thread_num(); curbuf = buf + tmp * bufsize; k = 0; for(;;) { #pragma omp critical(sssort_lock) { if(0 < (l = j)) { d0 = c0, d1 = c1; do { k = BUCKET_BSTAR(d0, d1); if(--d1 <= d0) { d1 = ALPHABET_SIZE - 1; if(--d0 < 0) { break; } } } while(((l - k) <= 1) && (0 < (l = k))); c0 = d0, c1 = d1, j = k; } } if(l == 0) { break; } sssort(T, PAb, SA + k, SA + l, curbuf, bufsize, 2, n, *(SA + k) == (m - 1)); } } #else buf = SA + m, bufsize = n - (2 * m); for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { i = BUCKET_BSTAR(c0, c1); if(1 < (j - i)) { sssort(T, PAb, SA + i, SA + j, buf, bufsize, 2, n, *(SA + i) == (m - 1)); } } } #endif /* Compute ranks of type B* substrings. */ for(i = m - 1; 0 <= i; --i) { if(0 <= SA[i]) { j = i; do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i])); SA[i + 1] = i - j; if(i <= 0) { break; } } j = i; do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0); ISAb[SA[i]] = j; } /* Construct the inverse suffix array of type B* suffixes using trsort. */ trsort(ISAb, SA, m, 1); /* Set the sorted order of tyoe B* suffixes. */ for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) { for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { } if(0 <= i) { t = i; for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { } SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t; } } /* Calculate the index of start/end point of each bucket. */ BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */ for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) { i = BUCKET_A(c0 + 1) - 1; for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) { t = i - BUCKET_B(c0, c1); BUCKET_B(c0, c1) = i; /* end point */ /* Move all type B* suffixes to the correct position. */ for(i = t, j = BUCKET_BSTAR(c0, c1); j <= k; --i, --k) { SA[i] = SA[k]; } } BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */ BUCKET_B(c0, c0) = i; /* end point */ } } return m; } /* Constructs the suffix array by using the sorted order of type B* suffixes. */ static void construct_SA(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n, saidx_t m) { saidx_t *i, *j, *k; saidx_t s; saint_t c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); *j = ~s; c0 = T[--s]; if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else { assert(((s == 0) && (T[s] == c1)) || (s < 0)); *j = ~s; } } } } /* Construct the suffix array by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; if((s == 0) || (T[s - 1] < c0)) { s = ~s; } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else { assert(s < 0); *i = ~s; } } } /* Constructs the burrows-wheeler transformed string directly by using the sorted order of type B* suffixes. */ static saidx_t construct_BWT(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n, saidx_t m) { saidx_t *i, *j, *k, *orig; saidx_t s; saint_t c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); c0 = T[--s]; *j = ~((saidx_t)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else if(s != 0) { *j = ~s; #ifndef NDEBUG } else { assert(T[s] == c1); #endif } } } } /* Construct the BWTed string by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~((saidx_t)T[n - 2]) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n, orig = SA; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; *i = c0; if((0 < s) && (T[s - 1] < c0)) { s = ~((saidx_t)T[s - 1]); } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else if(s != 0) { *i = ~s; } else { orig = i; } } return orig - SA; } /*---------------------------------------------------------------------------*/ /*- Function -*/ saint_t divsufsort(const sauchar_t *T, saidx_t *SA, saidx_t n) { saidx_t *bucket_A, *bucket_B; saidx_t m; saint_t err = 0; /* Check arguments. */ if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; } else if(n == 0) { return 0; } else if(n == 1) { SA[0] = 0; return 0; } else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; } bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t)); bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t)); /* Suffixsort. */ if((bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, SA, bucket_A, bucket_B, n); construct_SA(T, SA, bucket_A, bucket_B, n, m); } else { err = -2; } free(bucket_B); free(bucket_A); return err; } saidx_t divbwt(const sauchar_t *T, sauchar_t *U, saidx_t *A, saidx_t n) { saidx_t *B; saidx_t *bucket_A, *bucket_B; saidx_t m, pidx, i; /* Check arguments. */ if((T == NULL) || (U == NULL) || (n < 0)) { return -1; } else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; } if((B = A) == NULL) { B = (saidx_t *)malloc((size_t)(n + 1) * sizeof(saidx_t)); } bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t)); bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t)); /* Burrows-Wheeler Transform. */ if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, B, bucket_A, bucket_B, n); pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m); /* Copy to output string. */ U[0] = T[n - 1]; for(i = 0; i < pidx; ++i) { U[i + 1] = (sauchar_t)B[i]; } for(i += 1; i < n; ++i) { U[i] = (sauchar_t)B[i]; } pidx += 1; } else { pidx = -2; } free(bucket_B); free(bucket_A); if(A == NULL) { free(B); } return pidx; } const char * divsufsort_version(void) { return PROJECT_VERSION_FULL; }
CacheGlue.h
/*************************************************************************** * Copyright (C) 2009-2013 by Florian Goth * * fgoth@wthp095 * * * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * * * Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ***************************************************************************/ #ifndef CACHE_GLUE_H #define CACHE_GLUE_H #include <map> #include <utility> #include <stdexcept> #include <valarray> #include "generalPhysics.h" #include "toFPType.h" #include "MTL/MTL_Macros.h" /** This structure encapsulates two Vertices and provides comparison operators */ template <class Vertex> struct VertexPair { Vertex v1;///< the first vertex Vertex v2;///< the second vertex /** Constructor for the vertex-pair @param a the first vertex @param b the second vertex */ VertexPair(Vertex a, Vertex b) : v1(a), v2(b) {} VertexPair(const VertexPair& rhs) : v1(rhs.v1), v2(rhs.v2) {} /** A comparison operator that probes for identity @param arg the other vertexpair to compare against @return true if the vertices in each VertexPair are the identical, else false */ inline bool operator==(const VertexPair& arg) const; /** A smaller-than operator to compare two vertices lexicographically @param arg the other Vertex-Pair to compare against @return true if the 'string' (v1,v2) is lexicographcally smaller than the same string arg. Else return false */ inline bool operator<(const VertexPair& arg) const; }; template <class T> inline VertexPair<T> make_Vertex_pair(T a, T b) { return VertexPair<T>(a,b); } template <class Configuration, class GreensFunction, typename FPType> class Wick { public: typedef typename GreensFunction::FreeGreensFunctionReturnValueType RetType; typedef typename Configuration::value_type Vertex; typedef Configuration Config; typedef GreensFunction GF; /** This function forwards the measurement to the Matrix-Container. The Matrix-Container can better decide which optimizations to make. */ inline RetType measure(const Configuration& configuration, const typename GreensFunction::Vertex& creator, const typename GreensFunction::Vertex& destructor) const { RetType retval = configuration.template measure<GreensFunction>(creator, destructor); return retval; } }; template <class Vertex, class GFRetVal> class DryRun;//forward declare DryRun template <class Vertex, class GFRetVal> class Cache { public: typedef typename std::map<VertexPair<Vertex>, GFRetVal >::iterator iterator; typedef typename std::map<VertexPair<Vertex>, GFRetVal >::const_iterator const_iterator; typedef std::valarray<std::pair<const VertexPair<Vertex>, GFRetVal>* > LinearType; inline std::size_t size() const throw() { return cache.size(); } inline GFRetVal get_key(const VertexPair<Vertex> vp) const MTL_PURE_FUNCTION { const_iterator it = cache.find(vp); if(unlikely(it == cache.end())) { std::cout<<"Vertices not found: "<<std::endl; std::cout<<vp.v1<<std::endl; std::cout<<vp.v2<<std::endl; std::cout<<"Aborting... Do something!!!!!"<<std::endl; exit(-1); } return it->second; } /** A function for obtaining a const_iterator to the beginning of the map @return a const_iterator to the beginning of the map */ inline const_iterator begin() const { return cache.begin(); } inline iterator begin() { return cache.begin(); } inline iterator end() { return cache.end(); } inline const_iterator end() const { return cache.end(); } inline LinearType linear_array() { LinearType ret(this->size()); int u = 0; for (typename std::map<VertexPair<Vertex>, GFRetVal>::iterator it = cache.begin(); it != cache.end(); ++it, ++u) ret[u] = &(*it); return ret; } inline void init() { #ifdef _OPENMP items.resize(this->size()); int u = 0; for (typename std::map<VertexPair<Vertex>, GFRetVal>::iterator it = cache.begin(); it != cache.end(); ++it, ++u) items[u] = &(*it); #endif } template <class T> inline void measure(const T& wick, const typename T::Config& conf) { #ifdef _OPENMP #pragma omp parallel for for (int u = 0; u < items.size(); ++u) { const Vertex& v1(items[u]->first.v1); const Vertex& v2(items[u]->first.v2); items[u]->second = wick.measure(conf, v1, v2); } #else for (typename std::map<VertexPair<Vertex>, GFRetVal >::iterator it = cache.begin(); it != cache.end(); ++it) { const Vertex& v1(it->first.v1); const Vertex& v2(it->first.v2); it->second = wick.measure(conf, v1, v2); } #endif } inline Cache(unsigned int) {} private: friend class DryRun<Vertex, GFRetVal>; inline void insert(VertexPair<Vertex>& arg) { cache.insert(std::make_pair(arg, GFRetVal(0.0))); } std::map<VertexPair<Vertex>, GFRetVal> cache;///< the cache with all the requested greensfunction values #ifdef _OPENMP LinearType items;///< some temporary space where we store the pointers into the map in a linear fashion #endif }; template <class GFRetVal, typename FPType> class Cache<Hubbard_Vertex<FPType>, GFRetVal>; template <class GFRetVal, typename FPType> class Cache_Iterator : std::iterator<std::input_iterator_tag, std::pair<VertexPair<Basic_Vertex<FPType> >, GFRetVal> > { public: inline Cache_Iterator& operator++()// equals ++it { ++it; if (it == cache[idx].end()) { ++idx; it = cache[idx].begin(); } return *this; } inline bool operator==(const Cache_Iterator& rhs) const { return (cache == rhs.cache) && (idx == rhs.idx) && (it == rhs.it); } inline bool operator!=(const Cache_Iterator& rhs) const { return !this->operator==(rhs); } inline std::pair<VertexPair<Basic_Vertex<FPType> >, GFRetVal>& operator*() { return *it; } private: unsigned int idx; typename std::map<VertexPair<Basic_Vertex<FPType> >, GFRetVal >::iterator it; Cache<Hubbard_Vertex<FPType>, GFRetVal>& cache; Cache_Iterator(Cache<Hubbard_Vertex<FPType>, GFRetVal>& arg) : idx(0), cache(arg), it( cache[idx].begin() ) {} }; template <class GFRetVal, typename FPType> class Cache<Hubbard_Vertex<FPType>, GFRetVal> { public: typedef Cache_Iterator<GFRetVal, FPType> iterator; typedef const Cache_Iterator<GFRetVal, FPType> const_iterator; inline std::size_t size() const throw() MTL_PURE_FUNCTION { std::size_t ret = 0; for (unsigned int k = 0; k < chain_len * chain_len; ++k) ret+= cache[k].size(); return ret; } inline GFRetVal get_key(const VertexPair<Hubbard_Vertex<FPType> > vp) const { return access(vp.v1.site, vp.v2.site).find(make_Vertex_pair(Basic_Vertex<FPType>(vp.v1.tau, vp.v1.spin), Basic_Vertex<FPType>(vp.v2.tau, vp.v2.spin)))->second; } inline const_iterator begin() const { return cache.begin(); } inline iterator begin() { return cache.begin(); } inline iterator end() { return cache.end(); } inline const_iterator end() const { return cache.end(); } inline Cache(const unsigned int len) : cache(new std::map<VertexPair<Basic_Vertex<FPType> >, GFRetVal>[len * len]), chain_len(len) {} inline ~Cache() { delete [] cache; } /** A function to measure the greensfunction values that are stored in the cache @param wick The object that performs the measurement on a Configuration @param conf The configuration */ template <class W> inline void measure(const W& wick, const typename W::Config& conf) { std::cout<<"Beginning measurement of Greensfunctions"<<std::endl; //somehow playing with the chunk size could probably help with the scheduling... #pragma omp parallel for schedule(dynamic) for (unsigned int k = 0; k < chain_len*chain_len; ++k) { // for (unsigned int j = 0; j < chain_len; ++j) // { const uint idx_v1 = k/chain_len; const uint idx_v2 = k%chain_len; std::map<VertexPair<Basic_Vertex<FPType> >, GFRetVal>& lm(access(idx_v1, idx_v2)); for (typename std::map<VertexPair<Basic_Vertex<FPType> >, GFRetVal>::iterator it = lm.begin(); it != lm.end(); ++it) { const VertexPair<Basic_Vertex<FPType> >& vp(it->first); const Hubbard_Vertex<FPType> v1(idx_v1, vp.v1.tau, vp.v1.spin); const Hubbard_Vertex<FPType> v2(idx_v2, vp.v2.tau, vp.v2.spin); it->second = wick.measure(conf, v1, v2); } // } } std::cout<<"measurement of Greensfunctions done!"<<std::endl; } inline void init() { } private: inline void insert(VertexPair<Hubbard_Vertex<FPType> >& arg) { GFRetVal zero = 0; access(arg.v1.site, arg.v2.site).insert(std::make_pair(make_Vertex_pair(Basic_Vertex<FPType>(arg.v1.tau, arg.v1.spin), Basic_Vertex<FPType>(arg.v2.tau, arg.v2.spin)), zero)); } friend class DryRun<Hubbard_Vertex<FPType>, GFRetVal>; std::map<VertexPair<Basic_Vertex<FPType> >, GFRetVal>* cache;///< the cache with all the requested greensfunction values unsigned int chain_len; inline const std::map<VertexPair<Basic_Vertex<FPType> >, GFRetVal>& access(unsigned int i, unsigned int k) const { return cache[i*chain_len + k]; } inline std::map<VertexPair<Basic_Vertex<FPType> >, GFRetVal>& access(unsigned int i, unsigned int k) { return cache[i*chain_len + k]; } }; /** We need to provide the Greensfunctionvalue-cache with the necessary data which Greensfunction values we want to evaluate at every Configuration. For this, each observable provides a dryrun() method that takes a DryRun object which takes care of storing the Objects into the cache. */ template <class Vertex, class GFRetVal> class DryRun { public: typedef typename SignToFPType<GFRetVal>::Ret FPType; /** Constructor of the DryRun Object @param arg a reference to the storage of the cache */ inline DryRun(Cache<Vertex, GFRetVal>& arg) : cache(arg) {} /** This ()-operator takes the two Vertices at which to evaluate the Greensfunction and stores them in the cache @param v1 the first Vertex (the adjungated fermi-operator) @param v2 the second Vertex */ inline void operator()(Vertex v1, Vertex v2); /** A function that maps the previously used doWickonSector to the Cache architecture the template parameter selects the Spin sector @param site_i the site of the first Vertex @param tau_i the time of the first vertex @param site_j the site of the second vertex @param tau_j the time of the second vertex */ template<SPINS Spin> inline void onSector(const int site_i, const FPType tau_i, const int site_j, const FPType tau_j); private: //declare certain things private to not allow copying this object DryRun(); DryRun(const DryRun&); DryRun& operator=(const DryRun&); Cache<Vertex, GFRetVal>& cache;///< A reference to the structure for the cache }; /** This class encapsulates the access from an observable to the cache. The Observables provide evaluate() methods that take this object, which will be used for evaluating the Greensfunctionvalues. */ template <class Vertex, class GFRetVal> class DoWick { public: typedef typename SignToFPType<GFRetVal>::Ret FPType; /** The constructor of the DoWick object @param arg a reference to the object that has the cached values */ inline DoWick(const Cache<Vertex, GFRetVal>& arg) : cache(arg) {} /** This ()-operator takes the two Vertices at which to evaluate the Greensfunction and stores them in the cache @param v1 the first Vertex (the adjungated fermi-operator) @param v2 the second Vertex */ inline GFRetVal operator()(Vertex v1, Vertex v2) const; /** This function mimicks the behaviour of the previously used doWickonSector function. It returns the value of the Greensfunction for the given values @param site_i the site of the first Vertex @param tau_i the time of the first vertex @param site_j the site of the second vertex @param tau_j the time of the second vertex @return the value of the Greensfunction for the given values */ template<SPINS Spin> inline GFRetVal onSector(const int site_i, const FPType tau_i, const int site_j, const FPType tau_j) const; private: //declare certain things private to not allow copying this object DoWick(); DoWick(const DoWick&); DoWick& operator=(const DoWick&); const Cache<Vertex, GFRetVal>& cache; }; template <class Vertex, typename FPType> struct DryRunHelper { static inline VertexPair<Vertex> createPair(const int site_i, const FPType tau_i, const int site_j, const FPType tau_j, SPINS spin); }; template <typename FPType> struct DryRunHelper<Basic_Vertex<FPType>, FPType> { static inline VertexPair<Basic_Vertex<FPType> > createPair(const int site_i, const FPType tau_i, const int site_j, const FPType tau_j, SPINS spin) { return VertexPair<Basic_Vertex<FPType> >(Basic_Vertex<FPType>(tau_i, spin), Basic_Vertex<FPType>(tau_j, spin)); } }; template <typename FPType> struct DryRunHelper<Hubbard_Vertex<FPType>, FPType> { static inline VertexPair<Hubbard_Vertex<FPType> > createPair(const int site_i, const FPType tau_i, const int site_j, const FPType tau_j, SPINS spin) { return VertexPair<Hubbard_Vertex<FPType> >(Hubbard_Vertex<FPType>(site_i, tau_i, spin), Hubbard_Vertex<FPType>(site_j, tau_j, spin)); } }; template< class Vertex, class GFRetVal> template<SPINS Spin> GFRetVal DoWick<Vertex, GFRetVal>::onSector(const int site_i, const FPType tau_i, const int site_j, const FPType tau_j) const { return cache.get_key(DryRunHelper<Vertex, FPType>::createPair(site_i, tau_i, site_j, tau_j, Spin)); } template< class Vertex, class GFRetVal> template<SPINS Spin> void DryRun<Vertex, GFRetVal>::onSector(const int site_i, const FPType tau_i, const int site_j, const FPType tau_j) { VertexPair<Vertex> ce = DryRunHelper<Vertex, FPType>::createPair(site_i, tau_i, site_j, tau_j, Spin); this->cache.insert(ce); } template< class Vertex, class GFRetVal> GFRetVal DoWick<Vertex, GFRetVal>::operator()(Vertex v1, Vertex v2) const { VertexPair<Vertex> ce(v1, v2); return cache.get_key(ce); } template< class Vertex, class GFRetVal> void DryRun<Vertex, GFRetVal>::operator()(Vertex v1, Vertex v2) { VertexPair<Vertex> ce(v1, v2); this->cache.insert(ce); } template <class Vertex> bool VertexPair<Vertex>::operator<(const VertexPair& arg) const { if (!(v1 < arg.v1)) { //greater-than-branch if (v1 == arg.v1) { return v2 < arg.v2; } return false; } return true; } template <class Vertex> bool VertexPair<Vertex>::operator==(const VertexPair& arg) const { return (arg.v1 == v1) && (arg.v2 == v2); } #endif
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFenvAccessHandler; std::unique_ptr<PragmaHandler> STDCFenvRoundHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// Current kind of OpenMP clause OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma STDC FENV_ROUND... void HandlePragmaFEnvRound(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); /// Kinds of compound pseudo-tokens formed by a sequence of two real tokens. enum class CompoundToken { /// A '(' '{' beginning a statement-expression. StmtExprBegin, /// A '}' ')' ending a statement-expression. StmtExprEnd, /// A '[' '[' beginning a C++11 or C2x attribute. AttrBegin, /// A ']' ']' ending a C++11 or C2x attribute. AttrEnd, /// A '::' '*' forming a C++ pointer-to-member declaration. MemberPtr, }; /// Check that a compound operator was written in a "sensible" way, and warn /// if not. void checkCompoundToken(SourceLocation FirstTokLoc, tok::TokenKind FirstTokKind, CompoundToken Op); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser *Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseUniqueStableNameExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator( llvm::function_ref<void(const Designation &)> CodeCompleteCB); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK, SourceLocation *LParenLoc = nullptr, SourceLocation *RParenLoc = nullptr); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeName, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); enum ParseAttrKindMask { PAKM_GNU = 1 << 0, PAKM_Declspec = 1 << 1, PAKM_CXX11 = 1 << 2, }; /// \brief Parse attributes based on what syntaxes are desired, allowing for /// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec: /// __attribute__((...)) __declspec(...) __attribute__((...))) /// Note that Microsoft attributes (spelled with single square brackets) are /// not supported by this because of parsing ambiguities with other /// constructs. /// /// There are some attribute parse orderings that should not be allowed in /// arbitrary order. e.g., /// /// [[]] __attribute__(()) int i; // OK /// __attribute__(()) [[]] int i; // Not OK /// /// Such situations should use the specific attribute parsing functionality. void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributesWithRange &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr); void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs); Attrs.takeAllFrom(AttrsWithRange); } /// \brief Possibly parse attributes based on what syntaxes are desired, /// allowing for the order to vary. bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributesWithRange &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs); return true; } return false; } bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs); return true; } return false; } void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } bool MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParseGNUAttributes(attrs, endLoc, LateAttrs); return true; } return false; } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } bool MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); return true; } return false; } bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) { ParseCXX11Attributes(attrs, endLoc); return true; } return false; } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) { ParseMicrosoftDeclSpecs(Attrs, End); return true; } return false; } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI, OMPTraitInfo *ParentTI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse 'omp [begin] assume[s]' directive. void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parse 'omp end assumes' directive. void ParseOpenMPEndAssumesDirective(SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers> MotionModifiers; SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
matrix.h
/*************************************************************************** * include/stxxl/bits/containers/matrix.h * * Part of the STXXL. See http://stxxl.sourceforge.net * * Copyright (C) 2010-2011 Raoul Steffen <R-Steffen@gmx.de> * * Distributed under the Boost Software License, Version 1.0. * (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) **************************************************************************/ #ifndef STXXL_CONTAINERS_MATRIX_HEADER #define STXXL_CONTAINERS_MATRIX_HEADER #include <stxxl/bits/containers/vector.h> #include <stxxl/bits/common/counting_ptr.h> #include <stxxl/bits/mng/block_scheduler.h> #include <stxxl/bits/containers/matrix_arithmetic.h> STXXL_BEGIN_NAMESPACE //! \defgroup matrix matrix //! Efficient external memory matrix operations //! \ingroup stlcont //! \{ /* index-variable naming convention: * [MODIFIER_][UNIT_]DIMENSION[_in_[MODIFIER_]ENVIRONMENT] * * e.g.: * block_row = number of row measured in rows consisting of blocks * element_row_in_block = number of row measured in rows consisting of elements in the (row of) block(s) * * size-variable naming convention: * [MODIFIER_][ENVIRONMENT_]DIMENSION[_in_UNITs] * * e.g. * height_in_blocks */ // forward declaration template <typename ValueType, unsigned BlockSideLength> class matrix; //! external column-vector container for matrix multiplication //! \tparam ValueType type of contained objects (POD with no references to internal memory) template <typename ValueType> class column_vector : public vector<ValueType> { public: typedef vector<ValueType> vector_type; typedef typename vector_type::size_type size_type; using vector_type::size; //! \param n number of elements column_vector(size_type n = 0) : vector_type(n) { } column_vector operator + (const column_vector& right) const { assert(size() == right.size()); column_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] + right[i]; return res; } column_vector operator - (const column_vector& right) const { assert(size() == right.size()); column_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] - right[i]; return res; } column_vector operator * (const ValueType scalar) const { column_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] * scalar; return res; } column_vector& operator += (const column_vector& right) { assert(size() == right.size()); for (size_type i = 0; i < size(); ++i) (*this)[i] += right[i]; return *this; } column_vector& operator -= (const column_vector& right) { assert(size() == right.size()); for (size_type i = 0; i < size(); ++i) (*this)[i] -= right[i]; return *this; } column_vector& operator *= (const ValueType scalar) { for (size_type i = 0; i < size(); ++i) (*this)[i] *= scalar; return *this; } void set_zero() { for (typename vector_type::iterator it = vector_type::begin(); it != vector_type::end(); ++it) *it = 0; } }; //! external row-vector container for matrix multiplication //! \tparam ValueType type of contained objects (POD with no references to internal memory) template <typename ValueType> class row_vector : public vector<ValueType> { public: typedef vector<ValueType> vector_type; typedef typename vector_type::size_type size_type; using vector_type::size; //! \param n number of elements row_vector(size_type n = 0) : vector_type(n) { } row_vector operator + (const row_vector& right) const { assert(size() == right.size()); row_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] + right[i]; return res; } row_vector operator - (const row_vector& right) const { assert(size() == right.size()); row_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] - right[i]; return res; } row_vector operator * (const ValueType scalar) const { row_vector res(size()); for (size_type i = 0; i < size(); ++i) res[i] = (*this)[i] * scalar; return res; } template <unsigned BlockSideLength> row_vector operator * (const matrix<ValueType, BlockSideLength>& right) const { return right.multiply_from_left(*this); } ValueType operator * (const column_vector<ValueType>& right) const { ValueType res = 0; for (size_type i = 0; i < size(); ++i) res += (*this)[i] * right[i]; return res; } row_vector& operator += (const row_vector& right) { assert(size() == right.size()); for (size_type i = 0; i < size(); ++i) (*this)[i] += right[i]; return *this; } row_vector& operator -= (const row_vector& right) { assert(size() == right.size()); for (size_type i = 0; i < size(); ++i) (*this)[i] -= right[i]; return *this; } row_vector& operator *= (const ValueType scalar) { for (size_type i = 0; i < size(); ++i) (*this)[i] *= scalar; return *this; } void set_zero() { for (typename vector_type::iterator it = vector_type::begin(); it != vector_type::end(); ++it) *it = 0; } }; //! Specialized swappable_block that interprets uninitialized as containing zeros. //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block //! //! When initializing, all values are set to zero. template <typename ValueType, unsigned BlockSideLength> class matrix_swappable_block : public swappable_block<ValueType, BlockSideLength* BlockSideLength> { public: typedef typename swappable_block<ValueType, BlockSideLength* BlockSideLength>::internal_block_type internal_block_type; using swappable_block<ValueType, BlockSideLength* BlockSideLength>::get_internal_block; void fill_default() { // get_internal_block checks acquired internal_block_type& data = get_internal_block(); #if STXXL_PARALLEL #pragma omp parallel for #endif for (int_type row = 0; row < int_type(BlockSideLength); ++row) for (int_type col = 0; col < int_type(BlockSideLength); ++col) data[row * BlockSideLength + col] = 0; } }; //! External container for a (sub)matrix. Not intended for direct use. //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block //! //! Stores blocks only, so all measures (height, width, row, col) are in blocks. template <typename ValueType, unsigned BlockSideLength> class swappable_block_matrix : public atomic_counted_object { public: typedef int_type size_type; typedef int_type elem_size_type; typedef block_scheduler<matrix_swappable_block<ValueType, BlockSideLength> > block_scheduler_type; typedef typename block_scheduler_type::swappable_block_identifier_type swappable_block_identifier_type; typedef std::vector<swappable_block_identifier_type> blocks_type; typedef matrix_local::matrix_operations<ValueType, BlockSideLength> Ops; block_scheduler_type& bs; private: // assigning is not allowed swappable_block_matrix& operator = (const swappable_block_matrix& other); protected: //! height of the matrix in blocks size_type height, //! width of the matrix in blocks width, //! height copied from supermatrix in blocks height_from_supermatrix, //! width copied from supermatrix in blocks width_from_supermatrix; //! the matrice's blocks in row-major blocks_type blocks; //! if the elements in each block are in col-major instead of row-major bool elements_in_blocks_transposed; //! get identifier of the block at (row, col) swappable_block_identifier_type & bl(const size_type row, const size_type col) { return blocks[row * width + col]; } public: //! Create an empty swappable_block_matrix of given dimensions. swappable_block_matrix(block_scheduler_type& bs, const size_type height_in_blocks, const size_type width_in_blocks, const bool transposed = false) : bs(bs), height(height_in_blocks), width(width_in_blocks), height_from_supermatrix(0), width_from_supermatrix(0), blocks(height * width), elements_in_blocks_transposed(transposed) { for (size_type row = 0; row < height; ++row) for (size_type col = 0; col < width; ++col) bl(row, col) = bs.allocate_swappable_block(); } //! Create swappable_block_matrix of given dimensions that //! represents the submatrix of supermatrix starting at (from_row_in_blocks, from_col_in_blocks). //! //! If supermatrix is not large enough, the submatrix is padded with empty blocks. //! The supermatrix must not be destructed or transposed before the submatrix is destructed. swappable_block_matrix(const swappable_block_matrix& supermatrix, const size_type height_in_blocks, const size_type width_in_blocks, const size_type from_row_in_blocks, const size_type from_col_in_blocks) : bs(supermatrix.bs), height(height_in_blocks), width(width_in_blocks), height_from_supermatrix(std::min(supermatrix.height - from_row_in_blocks, height)), width_from_supermatrix(std::min(supermatrix.width - from_col_in_blocks, width)), blocks(height * width), elements_in_blocks_transposed(supermatrix.elements_in_blocks_transposed) { for (size_type row = 0; row < height_from_supermatrix; ++row) { for (size_type col = 0; col < width_from_supermatrix; ++col) bl(row, col) = supermatrix.block(row + from_row_in_blocks, col + from_col_in_blocks); for (size_type col = width_from_supermatrix; col < width; ++col) bl(row, col) = bs.allocate_swappable_block(); } for (size_type row = height_from_supermatrix; row < height; ++row) for (size_type col = 0; col < width; ++col) bl(row, col) = bs.allocate_swappable_block(); } //! Create swappable_block_matrix that represents the combination matrix ul ur dl dr. //! //! The submatrices are assumed to be of fitting dimensions and equal transposition. //! The submatrices must not be destructed or transposed before the matrix is destructed. swappable_block_matrix(const swappable_block_matrix& ul, const swappable_block_matrix& ur, const swappable_block_matrix& dl, const swappable_block_matrix& dr) : bs(ul.bs), height(ul.height + dl.height), width(ul.width + ur.width), height_from_supermatrix(height), width_from_supermatrix(width), blocks(height * width), elements_in_blocks_transposed(ul.elements_in_blocks_transposed) { for (size_type row = 0; row < ul.height; ++row) { for (size_type col = 0; col < ul.width; ++col) bl(row, col) = ul.block(row, col); for (size_type col = ul.width; col < width; ++col) bl(row, col) = ur.block(row, col - ul.width); } for (size_type row = ul.height; row < height; ++row) { for (size_type col = 0; col < ul.width; ++col) bl(row, col) = dl.block(row - ul.height, col); for (size_type col = ul.width; col < width; ++col) bl(row, col) = dr.block(row - ul.height, col - ul.width); } } swappable_block_matrix(const swappable_block_matrix& other) : atomic_counted_object(other), bs(other.bs), height(other.height), width(other.width), height_from_supermatrix(0), width_from_supermatrix(0), blocks(height * width), elements_in_blocks_transposed(false) { for (size_type row = 0; row < height; ++row) for (size_type col = 0; col < width; ++col) bl(row, col) = bs.allocate_swappable_block(); // 0 + other is copying Ops::element_op(*this, other, typename Ops::addition()); } ~swappable_block_matrix() { for (size_type row = 0; row < height_from_supermatrix; ++row) { for (size_type col = width_from_supermatrix; col < width; ++col) bs.free_swappable_block(bl(row, col)); } for (size_type row = height_from_supermatrix; row < height; ++row) for (size_type col = 0; col < width; ++col) bs.free_swappable_block(bl(row, col)); } static size_type block_index_from_elem(elem_size_type index) { return index / BlockSideLength; } static int_type elem_index_in_block_from_elem(elem_size_type index) { return index % BlockSideLength; } // regards transposed int_type elem_index_in_block_from_elem(elem_size_type row, elem_size_type col) const { return (is_transposed()) ? row % BlockSideLength + col % BlockSideLength * BlockSideLength : row % BlockSideLength * BlockSideLength + col % BlockSideLength; } //! get identifier of the block at (row, col) const swappable_block_identifier_type & block(const size_type row, const size_type col) const { return blocks[row * width + col]; } //! get identifier of the block at (row, col) const swappable_block_identifier_type& operator () (const size_type row, const size_type col) const { return block(row, col); } const size_type & get_height() const { return height; } const size_type & get_width() const { return width; } //! if the elements inside the blocks are in transposed order i.e. column-major const bool & is_transposed() const { return elements_in_blocks_transposed; } void transpose() { // transpose matrix of blocks blocks_type bn(blocks.size()); for (size_type row = 0; row < height; ++row) for (size_type col = 0; col < width; ++col) bn[col * height + row] = bl(row, col); bn.swap(blocks); // swap dimensions std::swap(height, width); std::swap(height_from_supermatrix, width_from_supermatrix); elements_in_blocks_transposed = ! elements_in_blocks_transposed; } void set_zero() { for (typename blocks_type::iterator it = blocks.begin(); it != blocks.end(); ++it) bs.deinitialize(*it); } }; //! general iterator type that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class matrix_iterator { protected: typedef matrix<ValueType, BlockSideLength> matrix_type; typedef typename matrix_type::swappable_block_matrix_type swappable_block_matrix_type; typedef typename matrix_type::block_scheduler_type block_scheduler_type; typedef typename block_scheduler_type::internal_block_type internal_block_type; typedef typename matrix_type::elem_size_type elem_size_type; typedef typename matrix_type::block_size_type block_size_type; template <typename VT, unsigned BSL> friend class matrix; template <typename VT, unsigned BSL> friend class const_matrix_iterator; matrix_type* m; elem_size_type current_row, // \ both indices == -1 <=> empty iterator current_col; // / block_size_type current_block_row, current_block_col; internal_block_type* current_iblock; // NULL if block is not acquired void acquire_current_iblock() { if (! current_iblock) current_iblock = &m->data->bs.acquire(m->data->block(current_block_row, current_block_col)); } void release_current_iblock() { if (current_iblock) { m->data->bs.release(m->data->block(current_block_row, current_block_col), true); current_iblock = 0; } } //! create iterator pointing to given row and col matrix_iterator(matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : m(&matrix), current_row(start_row), current_col(start_col), current_block_row(m->data->block_index_from_elem(start_row)), current_block_col(m->data->block_index_from_elem(start_col)), current_iblock(0) { } //! create empty iterator matrix_iterator(matrix_type& matrix) : m(&matrix), current_row(-1), // empty iterator current_col(-1), current_block_row(-1), current_block_col(-1), current_iblock(0) { } void set_empty() { release_current_iblock(); current_row = -1; current_col = -1; current_block_row = -1; current_block_col = -1; } public: matrix_iterator(const matrix_iterator& other) : m(other.m), current_row(other.current_row), current_col(other.current_col), current_block_row(other.current_block_row), current_block_col(other.current_block_col), current_iblock(0) { if (other.current_iblock) acquire_current_iblock(); } matrix_iterator& operator = (const matrix_iterator& other) { set_pos(other.current_row, other.current_col); m = other.m; if (other.current_iblock) acquire_current_iblock(); return *this; } ~matrix_iterator() { release_current_iblock(); } void set_row(const elem_size_type new_row) { const block_size_type new_block_row = m->data->block_index_from_elem(new_row); if (new_block_row != current_block_row) { release_current_iblock(); current_block_row = new_block_row; } current_row = new_row; } void set_col(const elem_size_type new_col) { const block_size_type new_block_col = m->data->block_index_from_elem(new_col); if (new_block_col != current_block_col) { release_current_iblock(); current_block_col = new_block_col; } current_col = new_col; } void set_pos(const elem_size_type new_row, const elem_size_type new_col) { const block_size_type new_block_row = m->data->block_index_from_elem(new_row), new_block_col = m->data->block_index_from_elem(new_col); if (new_block_col != current_block_col || new_block_row != current_block_row) { release_current_iblock(); current_block_row = new_block_row; current_block_col = new_block_col; } current_row = new_row; current_col = new_col; } void set_pos(const std::pair<elem_size_type, elem_size_type> new_pos) { set_pos(new_pos.first, new_pos.second); } const elem_size_type & get_row() const { return current_row; } const elem_size_type & get_col() const { return current_col; } std::pair<elem_size_type, elem_size_type> get_pos() const { return std::make_pair(current_row, current_col); } bool empty() const { return current_row == -1 && current_col == -1; } operator bool () const { return ! empty(); } bool operator == (const matrix_iterator& other) const { return current_row == other.current_row && current_col == other.current_col && m == other.m; } //! Returns reference access to the element referenced by the iterator. //! The reference is only valid so long as the iterator is not moved. ValueType& operator * () { acquire_current_iblock(); return (*current_iblock)[m->data->elem_index_in_block_from_elem(current_row, current_col)]; } }; //! row-major iterator that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class matrix_row_major_iterator : public matrix_iterator<ValueType, BlockSideLength> { protected: typedef matrix_iterator<ValueType, BlockSideLength> matrix_iterator_type; typedef typename matrix_iterator_type::matrix_type matrix_type; typedef typename matrix_iterator_type::elem_size_type elem_size_type; template <typename VT, unsigned BSL> friend class matrix; using matrix_iterator_type::m; using matrix_iterator_type::set_empty; //! create iterator pointing to given row and col matrix_row_major_iterator(matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : matrix_iterator_type(matrix, start_row, start_col) { } //! create empty iterator matrix_row_major_iterator(matrix_type& matrix) : matrix_iterator_type(matrix) { } public: //! convert from matrix_iterator matrix_row_major_iterator(const matrix_iterator_type& matrix_iterator) : matrix_iterator_type(matrix_iterator) { } // Has to be not empty, else behavior is undefined. matrix_row_major_iterator& operator ++ () { if (get_col() + 1 < m->get_width()) // => not matrix_row_major_iterator the end of row, move right set_col(get_col() + 1); else if (get_row() + 1 < m->get_height()) // => at end of row but not last row, move to beginning of next row set_pos(get_row() + 1, 0); else // => at end of matrix, set to empty-state set_empty(); return *this; } // Has to be not empty, else behavior is undefined. matrix_row_major_iterator& operator -- () { if (get_col() - 1 >= 0) // => not at the beginning of row, move left set_col(get_col() - 1); else if (get_row() - 1 >= 0) // => at beginning of row but not first row, move to end of previous row set_pos(get_row() - 1, m->get_width() - 1); else // => at beginning of matrix, set to empty-state set_empty(); return *this; } using matrix_iterator_type::get_row; using matrix_iterator_type::get_col; using matrix_iterator_type::set_col; using matrix_iterator_type::set_pos; }; //! column-major iterator that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class matrix_col_major_iterator : public matrix_iterator<ValueType, BlockSideLength> { protected: typedef matrix_iterator<ValueType, BlockSideLength> matrix_iterator_type; typedef typename matrix_iterator_type::matrix_type matrix_type; typedef typename matrix_iterator_type::elem_size_type elem_size_type; template <typename VT, unsigned BSL> friend class matrix; using matrix_iterator_type::m; using matrix_iterator_type::set_empty; //! create iterator pointing to given row and col matrix_col_major_iterator(matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : matrix_iterator_type(matrix, start_row, start_col) { } //! create empty iterator matrix_col_major_iterator(matrix_type& matrix) : matrix_iterator_type(matrix) { } public: //! convert from matrix_iterator matrix_col_major_iterator(const matrix_iterator_type& matrix_iterator) : matrix_iterator_type(matrix_iterator) { } // Has to be not empty, else behavior is undefined. matrix_col_major_iterator& operator ++ () { if (get_row() + 1 < m->get_height()) // => not at the end of col, move down set_row(get_row() + 1); else if (get_col() + 1 < m->get_width()) // => at end of col but not last col, move to beginning of next col set_pos(0, get_col() + 1); else // => at end of matrix, set to empty-state set_empty(); return *this; } // Has to be not empty, else behavior is undefined. matrix_col_major_iterator& operator -- () { if (get_row() - 1 >= 0) // => not at the beginning of col, move up set_row(get_row() - 1); else if (get_col() - 1 >= 0) // => at beginning of col but not first col, move to end of previous col set_pos(m->get_height() - 1, get_col() - 1); else // => at beginning of matrix, set to empty-state set_empty(); return *this; } using matrix_iterator_type::get_row; using matrix_iterator_type::get_col; using matrix_iterator_type::set_row; using matrix_iterator_type::set_pos; }; //! general const_iterator type that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class const_matrix_iterator { protected: typedef matrix<ValueType, BlockSideLength> matrix_type; typedef typename matrix_type::swappable_block_matrix_type swappable_block_matrix_type; typedef typename matrix_type::block_scheduler_type block_scheduler_type; typedef typename block_scheduler_type::internal_block_type internal_block_type; typedef typename matrix_type::elem_size_type elem_size_type; typedef typename matrix_type::block_size_type block_size_type; template <typename VT, unsigned BSL> friend class matrix; const matrix_type* m; elem_size_type current_row, // \ both indices == -1 <=> empty iterator current_col; // / block_size_type current_block_row, current_block_col; internal_block_type* current_iblock; // NULL if block is not acquired void acquire_current_iblock() { if (! current_iblock) current_iblock = &m->data->bs.acquire(m->data->block(current_block_row, current_block_col)); } void release_current_iblock() { if (current_iblock) { m->data->bs.release(m->data->block(current_block_row, current_block_col), false); current_iblock = 0; } } //! create iterator pointing to given row and col const_matrix_iterator(const matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : m(&matrix), current_row(start_row), current_col(start_col), current_block_row(m->data->block_index_from_elem(start_row)), current_block_col(m->data->block_index_from_elem(start_col)), current_iblock(0) { } //! create empty iterator const_matrix_iterator(const matrix_type& matrix) : m(&matrix), current_row(-1), // empty iterator current_col(-1), current_block_row(-1), current_block_col(-1), current_iblock(0) { } void set_empty() { release_current_iblock(); current_row = -1; current_col = -1; current_block_row = -1; current_block_col = -1; } public: const_matrix_iterator(const matrix_iterator<ValueType, BlockSideLength>& other) : m(other.m), current_row(other.current_row), current_col(other.current_col), current_block_row(other.current_block_row), current_block_col(other.current_block_col), current_iblock(0) { if (other.current_iblock) acquire_current_iblock(); } const_matrix_iterator(const const_matrix_iterator& other) : m(other.m), current_row(other.current_row), current_col(other.current_col), current_block_row(other.current_block_row), current_block_col(other.current_block_col), current_iblock(0) { if (other.current_iblock) acquire_current_iblock(); } const_matrix_iterator& operator = (const const_matrix_iterator& other) { set_pos(other.current_row, other.current_col); m = other.m; if (other.current_iblock) acquire_current_iblock(); return *this; } ~const_matrix_iterator() { release_current_iblock(); } void set_row(const elem_size_type new_row) { const block_size_type new_block_row = m->data->block_index_from_elem(new_row); if (new_block_row != current_block_row) { release_current_iblock(); current_block_row = new_block_row; } current_row = new_row; } void set_col(const elem_size_type new_col) { const block_size_type new_block_col = m->data->block_index_from_elem(new_col); if (new_block_col != current_block_col) { release_current_iblock(); current_block_col = new_block_col; } current_col = new_col; } void set_pos(const elem_size_type new_row, const elem_size_type new_col) { const block_size_type new_block_row = m->data->block_index_from_elem(new_row), new_block_col = m->data->block_index_from_elem(new_col); if (new_block_col != current_block_col || new_block_row != current_block_row) { release_current_iblock(); current_block_row = new_block_row; current_block_col = new_block_col; } current_row = new_row; current_col = new_col; } void set_pos(const std::pair<elem_size_type, elem_size_type> new_pos) { set_pos(new_pos.first, new_pos.second); } const elem_size_type & get_row() const { return current_row; } const elem_size_type & get_col() const { return current_col; } std::pair<elem_size_type, elem_size_type> get_pos() const { return std::make_pair(current_row, current_col); } bool empty() const { return current_row == -1 && current_col == -1; } operator bool () const { return ! empty(); } bool operator == (const const_matrix_iterator& other) const { return current_row == other.current_row && current_col == other.current_col && m == other.m; } //! Returns reference access to the element referenced by the iterator. //! The reference is only valid so long as the iterator is not moved. const ValueType& operator * () { acquire_current_iblock(); return (*current_iblock)[m->data->elem_index_in_block_from_elem(current_row, current_col)]; } }; //! row-major const_iterator that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class const_matrix_row_major_iterator : public const_matrix_iterator<ValueType, BlockSideLength> { protected: typedef const_matrix_iterator<ValueType, BlockSideLength> const_matrix_iterator_type; typedef typename const_matrix_iterator_type::matrix_type matrix_type; typedef typename const_matrix_iterator_type::elem_size_type elem_size_type; template <typename VT, unsigned BSL> friend class matrix; using const_matrix_iterator_type::m; using const_matrix_iterator_type::set_empty; //! create iterator pointing to given row and col const_matrix_row_major_iterator(const matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : const_matrix_iterator_type(matrix, start_row, start_col) { } //! create empty iterator const_matrix_row_major_iterator(const matrix_type& matrix) : const_matrix_iterator_type(matrix) { } public: //! convert from matrix_iterator const_matrix_row_major_iterator(const const_matrix_row_major_iterator& matrix_iterator) : const_matrix_iterator_type(matrix_iterator) { } //! convert from matrix_iterator const_matrix_row_major_iterator(const const_matrix_iterator_type& matrix_iterator) : const_matrix_iterator_type(matrix_iterator) { } // Has to be not empty, else behavior is undefined. const_matrix_row_major_iterator& operator ++ () { if (get_col() + 1 < m->get_width()) // => not matrix_row_major_iterator the end of row, move right set_col(get_col() + 1); else if (get_row() + 1 < m->get_height()) // => at end of row but not last row, move to beginning of next row set_pos(get_row() + 1, 0); else // => at end of matrix, set to empty-state set_empty(); return *this; } // Has to be not empty, else behavior is undefined. const_matrix_row_major_iterator& operator -- () { if (get_col() - 1 >= 0) // => not at the beginning of row, move left set_col(get_col() - 1); else if (get_row() - 1 >= 0) // => at beginning of row but not first row, move to end of previous row set_pos(get_row() - 1, m->get_width() - 1); else // => at beginning of matrix, set to empty-state set_empty(); return *this; } using const_matrix_iterator_type::get_row; using const_matrix_iterator_type::get_col; using const_matrix_iterator_type::set_col; using const_matrix_iterator_type::set_pos; }; //! column-major const_iterator that points to single elements inside a matrix //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block template <typename ValueType, unsigned BlockSideLength> class const_matrix_col_major_iterator : public const_matrix_iterator<ValueType, BlockSideLength> { protected: typedef const_matrix_iterator<ValueType, BlockSideLength> const_matrix_iterator_type; typedef typename const_matrix_iterator_type::matrix_type matrix_type; typedef typename const_matrix_iterator_type::elem_size_type elem_size_type; template <typename VT, unsigned BSL> friend class matrix; using const_matrix_iterator_type::m; using const_matrix_iterator_type::set_empty; //! create iterator pointing to given row and col const_matrix_col_major_iterator(const matrix_type& matrix, const elem_size_type start_row, const elem_size_type start_col) : const_matrix_iterator_type(matrix, start_row, start_col) { } //! create empty iterator const_matrix_col_major_iterator(const matrix_type& matrix) : const_matrix_iterator_type(matrix) { } public: //! convert from matrix_iterator const_matrix_col_major_iterator(const matrix_iterator<ValueType, BlockSideLength>& matrix_iterator) : const_matrix_iterator_type(matrix_iterator) { } //! convert from matrix_iterator const_matrix_col_major_iterator(const const_matrix_iterator_type& matrix_iterator) : const_matrix_iterator_type(matrix_iterator) { } // Has to be not empty, else behavior is undefined. const_matrix_col_major_iterator& operator ++ () { if (get_row() + 1 < m->get_height()) // => not at the end of col, move down set_row(get_row() + 1); else if (get_col() + 1 < m->get_width()) // => at end of col but not last col, move to beginning of next col set_pos(0, get_col() + 1); else // => at end of matrix, set to empty-state set_empty(); return *this; } // Has to be not empty, else behavior is undefined. const_matrix_col_major_iterator& operator -- () { if (get_row() - 1 >= 0) // => not at the beginning of col, move up set_row(get_row() - 1); else if (get_col() - 1 >= 0) // => at beginning of col but not first col, move to end of previous col set_pos(m->get_height() - 1, get_col() - 1); else // => at beginning of matrix, set to empty-state set_empty(); return *this; } using const_matrix_iterator_type::get_row; using const_matrix_iterator_type::get_col; using const_matrix_iterator_type::set_row; using const_matrix_iterator_type::set_pos; }; //! External matrix container. \n //! <b> Introduction </b> to matrix container: see \ref tutorial_matrix tutorial. \n //! <b> Design and Internals </b> of matrix container: see \ref design_matrix. //! //! \tparam ValueType type of contained objects (POD with no references to internal memory) //! \tparam BlockSideLength side length of a matrix block //! //! Divides the matrix in square submatrices (blocks). //! Blocks can be swapped individually to and from external memory. //! They are only swapped if necessary to minimize I/O. template <typename ValueType, unsigned BlockSideLength> class matrix { protected: typedef matrix<ValueType, BlockSideLength> matrix_type; typedef swappable_block_matrix<ValueType, BlockSideLength> swappable_block_matrix_type; typedef counting_ptr<swappable_block_matrix_type> swappable_block_matrix_pointer_type; typedef typename swappable_block_matrix_type::block_scheduler_type block_scheduler_type; typedef typename swappable_block_matrix_type::size_type block_size_type; typedef typename swappable_block_matrix_type::elem_size_type elem_size_type; typedef matrix_local::matrix_operations<ValueType, BlockSideLength> Ops; typedef matrix_swappable_block<ValueType, BlockSideLength> swappable_block_type; public: typedef matrix_iterator<ValueType, BlockSideLength> iterator; typedef const_matrix_iterator<ValueType, BlockSideLength> const_iterator; typedef matrix_row_major_iterator<ValueType, BlockSideLength> row_major_iterator; typedef matrix_col_major_iterator<ValueType, BlockSideLength> col_major_iterator; typedef const_matrix_row_major_iterator<ValueType, BlockSideLength> const_row_major_iterator; typedef const_matrix_col_major_iterator<ValueType, BlockSideLength> const_col_major_iterator; typedef column_vector<ValueType> column_vector_type; typedef row_vector<ValueType> row_vector_type; protected: template <typename VT, unsigned BSL> friend class matrix_iterator; template <typename VT, unsigned BSL> friend class const_matrix_iterator; elem_size_type height, width; swappable_block_matrix_pointer_type data; public: //! \name Constructors/Destructors //! \{ //! Creates a new matrix of given dimensions. Elements' values are set to zero. //! \param bs block scheduler used //! \param height height of the created matrix //! \param width width of the created matrix matrix(block_scheduler_type& bs, const elem_size_type height, const elem_size_type width) : height(height), width(width), data( new swappable_block_matrix_type( bs, div_ceil(height, BlockSideLength), div_ceil(width, BlockSideLength)) ) { } matrix(block_scheduler_type& bs, const column_vector_type& left, const row_vector_type& right) : height((elem_size_type)left.size()), width((elem_size_type)right.size()), data( new swappable_block_matrix_type( bs, div_ceil(height, BlockSideLength), div_ceil(width, BlockSideLength)) ) { Ops::recursive_matrix_from_vectors(*data, left, right); } ~matrix() { } //! \} //! \name Capacity //! \{ const elem_size_type & get_height() const { return height; } const elem_size_type & get_width() const { return width; } //! \} //! \name Iterators //! \{ iterator begin() { data.unify(); return iterator(*this, 0, 0); } const_iterator begin() const { return const_iterator(*this, 0, 0); } const_iterator cbegin() const { return const_iterator(*this, 0, 0); } iterator end() { data.unify(); return iterator(*this); } const_iterator end() const { return const_iterator(*this); } const_iterator cend() const { return const_iterator(*this); } const_iterator operator () (const elem_size_type row, const elem_size_type col) const { return const_iterator(*this, row, col); } iterator operator () (const elem_size_type row, const elem_size_type col) { data.unify(); return iterator(*this, row, col); } //! \} //! \name Modifiers //! \{ void transpose() { data.unify(); data->transpose(); std::swap(height, width); } void set_zero() { if (data.unique()) data->set_zero(); else data = new swappable_block_matrix_type (data->bs, div_ceil(height, BlockSideLength), div_ceil(width, BlockSideLength)); } //! \} //! \name Operations //! \{ matrix_type operator + (const matrix_type& right) const { assert(height == right.height && width == right.width); matrix_type res(data->bs, height, width); Ops::element_op(*res.data, *data, *right.data, typename Ops::addition()); // more efficient than copying this and then adding right return res; } matrix_type operator - (const matrix_type& right) const { assert(height == right.height && width == right.width); matrix_type res(data->bs, height, width); Ops::element_op(*res.data, *data, *right.data, typename Ops::subtraction()); // more efficient than copying this and then subtracting right return res; } matrix_type operator * (const matrix_type& right) const { return multiply(right); } matrix_type operator * (const ValueType scalar) const { matrix_type res(data->bs, height, width); Ops::element_op(*res.data, *data, typename Ops::scalar_multiplication(scalar)); return res; } matrix_type& operator += (const matrix_type& right) { assert(height == right.height && width == right.width); data.unify(); Ops::element_op(*data, *right.data, typename Ops::addition()); return *this; } matrix_type& operator -= (const matrix_type& right) { assert(height == right.height && width == right.width); data.unify(); Ops::element_op(*data, *right.data, typename Ops::subtraction()); return *this; } matrix_type& operator *= (const matrix_type& right) { return *this = operator * (right); } // implicitly unifies by constructing a result-matrix matrix_type& operator *= (const ValueType scalar) { data.unify(); Ops::element_op(*data, typename Ops::scalar_multiplication(scalar)); return *this; } column_vector_type operator * (const column_vector_type& right) const { assert(elem_size_type(right.size()) == width); column_vector_type res(height); res.set_zero(); Ops::recursive_matrix_col_vector_multiply_and_add(*data, right, res); return res; } row_vector_type multiply_from_left(const row_vector_type& left) const { assert(elem_size_type(left.size()) == height); row_vector_type res(width); res.set_zero(); Ops::recursive_matrix_row_vector_multiply_and_add(left, *data, res); return res; } //! multiply with another matrix //! \param right matrix to multiply with //! \param multiplication_algorithm allows to choose the applied algorithm //! \param scheduling_algorithm allows to choose the applied algorithm //! //! Available algorithms are: \n //! 0: naive_multiply_and_add (I/O inefficient, slow) \n //! 1: recursive_multiply_and_add (recommended, default, stable time and I/O complexity) \n //! 2: strassen_winograd_multiply_and_add (sometimes fast but unstable time and I/O complexity) \n //! 3: multi_level_strassen_winograd_multiply_and_add (sometimes fast but unstable time and I/O complexity) \n //! 4: strassen_winograd_multiply, optimized pre- and postadditions (sometimes fast but unstable time and I/O complexity) \n //! 5: strassen_winograd_multiply_and_add_interleaved, optimized preadditions (sometimes fast but unstable time and I/O complexity) \n //! 6: multi_level_strassen_winograd_multiply_and_add_block_grained (sometimes fast but unstable time and I/O complexity) matrix_type multiply(const matrix_type& right, const int_type multiplication_algorithm = 1, const int_type scheduling_algorithm = 2) const { assert(width == right.height); assert(&data->bs == &right.data->bs); matrix_type res(data->bs, height, right.width); if (scheduling_algorithm > 0) { // all offline algos need a simulation-run delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_simulation<swappable_block_type>(data->bs) ); switch (multiplication_algorithm) { case 0: Ops::naive_multiply_and_add(*data, *right.data, *res.data); break; case 1: Ops::recursive_multiply_and_add(*data, *right.data, *res.data); break; case 2: Ops::strassen_winograd_multiply_and_add(*data, *right.data, *res.data); break; case 3: Ops::multi_level_strassen_winograd_multiply_and_add(*data, *right.data, *res.data); break; case 4: Ops::strassen_winograd_multiply(*data, *right.data, *res.data); break; case 5: Ops::strassen_winograd_multiply_and_add_interleaved(*data, *right.data, *res.data); break; case 6: Ops::multi_level_strassen_winograd_multiply_and_add_block_grained(*data, *right.data, *res.data); break; default: STXXL_ERRMSG("invalid multiplication-algorithm number"); break; } } switch (scheduling_algorithm) { case 0: delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_online_lru<swappable_block_type>(data->bs) ); break; case 1: delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_offline_lfd<swappable_block_type>(data->bs) ); break; case 2: delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_offline_lru_prefetching<swappable_block_type>(data->bs) ); break; default: STXXL_ERRMSG("invalid scheduling-algorithm number"); } switch (multiplication_algorithm) { case 0: Ops::naive_multiply_and_add(*data, *right.data, *res.data); break; case 1: Ops::recursive_multiply_and_add(*data, *right.data, *res.data); break; case 2: Ops::strassen_winograd_multiply_and_add(*data, *right.data, *res.data); break; case 3: Ops::multi_level_strassen_winograd_multiply_and_add(*data, *right.data, *res.data); break; case 4: Ops::strassen_winograd_multiply(*data, *right.data, *res.data); break; case 5: Ops::strassen_winograd_multiply_and_add_interleaved(*data, *right.data, *res.data); break; case 6: Ops::multi_level_strassen_winograd_multiply_and_add_block_grained(*data, *right.data, *res.data); break; default: STXXL_ERRMSG("invalid multiplication-algorithm number"); break; } delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_online_lru<swappable_block_type>(data->bs) ); return res; } //! Use internal memory multiplication. Designated for testing. May exceed memory limitations. matrix_type multiply_internal(const matrix_type& right, const int_type scheduling_algorithm = 2) const { assert(width == right.height); assert(&data->bs == &right.data->bs); matrix_type res(data->bs, height, right.width); if (scheduling_algorithm > 0) { // all offline algos need a simulation-run delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_simulation<swappable_block_type>(data->bs) ); multiply_internal(right, res); } switch (scheduling_algorithm) { case 0: delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_online_lru<swappable_block_type>(data->bs) ); break; case 1: delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_offline_lfd<swappable_block_type>(data->bs) ); break; case 2: delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_offline_lru_prefetching<swappable_block_type>(data->bs) ); break; default: STXXL_ERRMSG("invalid scheduling-algorithm number"); } multiply_internal(right, res); delete data->bs.switch_algorithm_to( new block_scheduler_algorithm_online_lru<swappable_block_type>(data->bs) ); return res; } //! \} protected: void multiply_internal(const matrix_type& right, matrix_type& res) const { ValueType* A = new ValueType[height * width]; ValueType* B = new ValueType[right.height * right.width]; ValueType* C = new ValueType[res.height * res.width]; ValueType* vit; vit = A; for (const_row_major_iterator mit = cbegin(); mit != cend(); ++mit, ++vit) *vit = *mit; vit = B; for (const_row_major_iterator mit = right.cbegin(); mit != right.cend(); ++mit, ++vit) *vit = *mit; if (! res.data->bs.is_simulating()) { #if STXXL_BLAS gemm_wrapper(height, width, res.width, ValueType(1), false, A, false, B, ValueType(0), false, C); #else assert(false /* internal multiplication is only available for testing with blas */); #endif } vit = C; for (row_major_iterator mit = res.begin(); mit != res.end(); ++mit, ++vit) *mit = *vit; delete[] A; delete[] B; delete[] C; } }; //! \} STXXL_END_NAMESPACE #endif // !STXXL_CONTAINERS_MATRIX_HEADER // vim: et:ts=4:sw=4
proc-tst-omp.c
/* * Oracle Linux DTrace. * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. * Licensed under the Universal Permissive License v 1.0 as shown at * http://oss.oracle.com/licenses/upl. */ #include <stdio.h> int main(int argc, char **argv) { printf("TEST: start\n"); #pragma omp parallel num_threads(2) { printf("TEST: underway\n"); } return 0; }
GB_binop__isgt_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_int16) // A.*B function (eWiseMult): GB (_AemultB_08__isgt_int16) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_int16) // A.*B function (eWiseMult): GB (_AemultB_04__isgt_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int16) // A*D function (colscale): GB (_AxD__isgt_int16) // D*A function (rowscale): GB (_DxB__isgt_int16) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_int16) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int16) // C=scalar+B GB (_bind1st__isgt_int16) // C=scalar+B' GB (_bind1st_tran__isgt_int16) // C=A+scalar GB (_bind2nd__isgt_int16) // C=A'+scalar GB (_bind2nd_tran__isgt_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT16 || GxB_NO_ISGT_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isgt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isgt_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isgt_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_for_firstprivate.c
// RUN: %libomp-compile-and-run // REQUIRES: !(abt && (clang || gcc)) #include <stdio.h> #include <math.h> #include "omp_testsuite.h" int sum1; #pragma omp threadprivate(sum1) int test_omp_for_firstprivate() { int sum; int sum0; int known_sum; int threadsnum; sum = 0; sum0 = 12345; sum1 = 0; #pragma omp parallel { #pragma omp single { threadsnum=omp_get_num_threads(); } /* sum0 = 0; */ int i; #pragma omp for firstprivate(sum0) for (i = 1; i <= LOOPCOUNT; i++) { sum0 = sum0 + i; sum1 = sum0; } /* end of for */ #pragma omp critical { sum = sum + sum1; } /* end of critical */ } /* end of parallel */ known_sum = 12345* threadsnum+ (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; return (known_sum == sum); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_firstprivate()) { num_failed++; } } return num_failed; }
DRACC_OMP_003_Counter_no_lock_yes.c
/* Concurrent access on a counter with no lock. Atomicity Violation. Data Race in line 14. Inter and Intra Region. */ #include <stdio.h> #define N 100000 int countervar = 0; int count(){ #pragma omp target map(tofrom:countervar) device(0) #pragma omp teams distribute parallel for for (int i=0; i<N; i++){ countervar++; } return 0; } int main(){ count(); printf("counter: %i expected: 100000\n ",countervar); return 0; }
learn.h
#pragma once #include <iostream> #include <sstream> #include <fstream> #include <cstdlib> #include <time.h> #include <stdio.h> #include <vector> #include <omp.h> #include <string> #include "Grid.h" #include "COOGrid.h" #include "FileWriter.h" #include <string> // This file produces execution time .dat files to evaluate the time performance // of different operations of the game and the impact of their parallelisation double run_time, start_time; double total_time = 0.0; // This function produces execution time series when // providing output in parallel and serial cases void output_analysis(int dim, int n_cores, bool write_or_print, int max_steps) { // Initialise file to write execution time for parallelised output std::string par_name; if (write_or_print) { par_name = "parallel_FW_time.dat"; std::fstream fp; fp.open(par_name, std::fstream::out | std::fstream::trunc); fp.close(); // create a random grid Grid grid = Grid(dim, dim, true, n_cores); // save strings from previous n_cores iterations std::vector<std::string> string_grids(n_cores, ""); // Counter initialised. Indicates iterations after last output int cnt = 0; // Start clock for (int n = 0; n < max_steps; n++) { start_time = omp_get_wtime(); string_grids[cnt] = grid.data; // Every number of iterations equal to the number of cores used if ((n + 1) % n_cores == 0) { // Each thread will perform output operations on a previously // stored iteration of the grid omp_set_num_threads(n_cores); // if this is the last iteration and number of iterations is not a multiple // of number of threads used, the for loop is restricted to the remainder int end = n == max_steps - 1 && max_steps % n_cores != 0 ? ((n + 1) % n_cores) : n_cores; #pragma omp parallel for for (int i = 0; i < end; i++) { // Depending on option chosen, write grid // to .dat, or print .bmp image grid_to_file(i + (n + 1) - end, string_grids[i], dim, dim); } // Restart counter cnt = -1; } // Record time at time-step run_time = omp_get_wtime() - start_time; total_time += run_time; time_data_to_file(par_name, n, dim, dim, total_time); cnt++; } // Initialise file to write execution time for serial output std::string ser_name = "serial_FW_time.dat"; std::fstream fs; fs.open(ser_name, std::fstream::out | std::fstream::trunc); fs.close(); // Start clock start_time = omp_get_wtime(); for (int n = 0; n < max_steps; n++) { grid.to_file(n); // Record time at time-step run_time = omp_get_wtime() - start_time; time_data_to_file(ser_name, n, dim, dim, run_time); } } else { par_name = "parallel_IP_time.dat"; std::fstream fp; fp.open(par_name, std::fstream::out | std::fstream::trunc); fp.close(); // create a random grid Grid grid = Grid(dim, dim, true, n_cores); // Vectors containing n_cores number of grids // from previous iterations get initialised std::vector<bool> store_grids; store_grids.resize(grid.cells.size() * n_cores); // Counter initialised. Indicates iterations after last output int cnt = 0; // Start clock for (int n = 0; n < max_steps; n++) { start_time = omp_get_wtime(); // if this is the last iteration and number of iterations is not a multiple // of number of threads used, the for loop is restricted to the remainder int end = n == max_steps - 1 && max_steps % n_cores != 0 ? ((n + 1) % n_cores) : n_cores; // Assign grid of current iteration to vector of // previous iterations omp_set_num_threads(n_cores); #pragma omp parallel for for (int i = cnt * grid.cells.size(); i < ((cnt + 1) * grid.cells.size()); i++) { store_grids[i] = grid.cells[i % grid.cells.size()]; } // Every number of iterations equal to the number of cores used if ((n + 1) % n_cores == 0) { // Each thread will perform output operations on a previously // stored iteration of the grid omp_set_num_threads(n_cores); #pragma omp parallel for for (int i = 0; i < end; i++) { // Each thread takes one grid from a previous iteration vector<bool>::const_iterator first = store_grids.begin() + i * grid.cells.size(); vector<bool>::const_iterator last = store_grids.begin() + (i + 1) * grid.cells.size(); vector<bool> v(first, last); print_IMG(v, dim, dim, i + (n + 1) - end); } // Restart counter cnt = -1; } // Record time at time-step run_time = omp_get_wtime() - start_time; total_time += run_time; time_data_to_file(par_name, n, dim, dim, total_time); cnt++; } // Initialise file to write execution time for serial output std::string ser_name = "serial_IP_time.dat"; std::fstream fs; fs.open(ser_name, std::fstream::out | std::fstream::trunc); fs.close(); // Start clock start_time = omp_get_wtime(); for (int n = 0; n < max_steps; n++) { // Calculate next generation in the game print_IMG(grid.cells, dim, dim, n); // Record time at time-step run_time = omp_get_wtime() - start_time; time_data_to_file(ser_name, n, dim, dim, run_time); } } } // This function produces execution time series when // for different sizes of serialised and parallelised // grids void size_analysis(int n_cores) { // Evaluate execution time for parallelised grids std::string par_name = "parallel_size_time.dat"; std::fstream fp; fp.open(par_name, std::fstream::out | std::fstream::trunc); fp.close(); std::vector<int> dims = {10, 100, 1000, 5000, 10000, 20000}; for (int dim : dims) { // Start clock start_time = omp_get_wtime(); // Create a random grid of different size for each iteration Grid grid = Grid(dim, dim, true, n_cores); // Calculate next generation in the game grid.do_iteration(false); // Record time at time-step run_time = omp_get_wtime() - start_time; time_data_to_file(par_name, 1, dim, dim, run_time); } // Evaluate execution time for serialised grids std::string ser_name = "serial_size_time.dat"; std::fstream fs; fs.open(ser_name, std::fstream::out | std::fstream::trunc); fs.close(); int single_core = 1; for (int dim : dims) { // Start clock start_time = omp_get_wtime(); // Generate random NxN grid Grid grid = Grid(dim, dim, false, single_core); // Calculate next generation in the game grid.do_iteration(false); // Record time at time-step run_time = omp_get_wtime() - start_time; time_data_to_file(ser_name, 1, dim, dim, run_time); } } // This function produces execution time series when // for different sizes of serialised and parallelised // grids void iterations_analysis(int dim, int n_cores, int max_steps) { // Evaluate execution time for parallelised grids std::string par_name = "parallel_its_time.dat"; std::fstream fp; fp.open(par_name, std::fstream::out | std::fstream::trunc); fp.close(); Grid grid = Grid(dim, dim, true, n_cores); // Start clock start_time = omp_get_wtime(); for (int n = 0; n < max_steps; n++) { // Calculate next generation in the game grid.do_iteration(false); // Record time at time-step run_time = omp_get_wtime() - start_time; time_data_to_file(par_name, n, dim, dim, run_time); } // Evaluate execution time for serialised grids std::string ser_name = "serial_its_time.dat"; std::fstream fs; fs.open(ser_name, std::fstream::out | std::fstream::trunc); fs.close(); int single_core = 1; Grid grid2 = Grid(dim, dim, false, n_cores); // Start clock start_time = omp_get_wtime(); for (int n = 0; n < max_steps; n++) { // Calculate next generation in the game grid2.do_iteration(false); // Record time at time-step run_time = omp_get_wtime() - start_time; time_data_to_file(ser_name, n, dim, dim, run_time); } } void cores_analysis(int dim) { // Evaluate execution time for parallelised grids std::string par_name = "parallel_cores_time.dat"; std::fstream fp; fp.open(par_name, std::fstream::out | std::fstream::trunc); fp.close(); std::vector<int> cores = {1, 2, 4, 8, 12}; for (int n_cores : cores) { // Start clock start_time = omp_get_wtime(); // create a random grid Grid grid = Grid(dim, dim, true, n_cores); // Calculate next generation in the game grid.do_iteration(false); // Record time at time-step run_time = omp_get_wtime() - start_time; time_data_to_file(par_name, n_cores, dim, dim, run_time); } }
Layer_Conv2D.h
/* * Layers.h * rl * * Created by Guido Novati on 11.02.16. * Copyright 2016 ETH Zurich. All rights reserved. * */ #pragma once #include "Layers.h" template <int InX, int InY, int InC, // input image: x:width, y:height, c:color channels int KnX, int KnY, int KnC, // filter: x:width, y:height, c:color channels int OpX, int OpY // output img: x:width, y:height, same color channels as KnC > struct Conv2DLayer : public Layer { Params *allocate_params() const override { // number of kernel parameters: // 2d kernel size * number of inp channels * number of out channels const int nParams = KnY * KnX * InC * KnC; const int nBiases = KnC; return new Params(nParams, nBiases); } Conv2DLayer(const int _ID) : Layer(OpX * OpY * KnC, _ID) { static_assert(InX > 0 && InY > 0 && InC > 0, "Invalid input"); static_assert(KnX > 0 && KnY > 0 && KnC > 0, "Invalid kernel"); static_assert(OpX > 0 && OpY > 0, "Invalid outpus"); print(); } void print() { printf("(%d) Conv: In:[%d %d %d %d %d] F:[%d %d %d %d] Out:[%d %d %d]\n", ID, OpY, OpX, KnY, KnX, InC, KnY, KnX, InC, KnC, OpX, OpY, KnC); } void forward(const std::vector<Activation *> &act, const std::vector<Params *> &param) const override { assert(act[ID]->layersSize == OpY * OpX * KnC); assert(act[ID - 1]->layersSize == OpY * OpX * KnY * KnX * InC); assert(param[ID]->nWeights == KnY * KnX * InC * KnC); assert(param[ID]->nBiases == KnC); const int batchSize = act[ID]->batchSize; const Real *const INP = act[ID - 1]->output; Real *const OUT = act[ID]->output; // reset layers' output with the bias #pragma omp parallel for collapse(2) for (int i = 0; i < batchSize * OpY * OpX; ++i) { for (int j = 0; j < KnC; ++j) { OUT[i * KnC + j] = param[ID]->biases[j]; } } // perform the forward step with gemm gemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, batchSize * OpY * OpX, KnC, KnY * KnX * InC, 1., INP, KnY * KnX * InC, param[ID]->weights, KnC, 1., OUT, KnC); } void bckward(const std::vector<Activation *> &act, const std::vector<Params *> &param, const std::vector<Params *> &grad) const override { const int batchSize = act[ID]->batchSize; const Real *const dEdO = act[ID]->dError_dOutput; // BackProp to compute bias gradient: dError / dBias { Real *const grad_B = grad[ID]->biases; std::fill(grad_B, grad_B + KnC, 0.); #pragma omp parallel for collapse(2) for (int i = 0; i < batchSize * OpY * OpX; ++i) { for (int j = 0; j < KnC; ++j) { #pragma omp atomic grad_B[j] += dEdO[i * KnC + j]; } } } // BackProp to compute weight gradient: dError / dWeights { Real *const grad_W = grad[ID]->weights; std::fill(grad_W, grad_W + KnY * KnX * InC * KnC, 0); gemm(CblasRowMajor, CblasTrans, CblasNoTrans, KnY * KnX * InC, KnC, batchSize * OpY * OpX, 1., act[ID - 1]->output, KnY * KnX * InC, dEdO, KnC, 0., grad_W, KnC); } // BackProp to compute dEdO of prev layer { Real *const errinp = act[ID - 1]->dError_dOutput; std::fill(errinp, errinp + batchSize * OpY * OpX * KnY * KnX * InC, 0.); gemm(CblasRowMajor, CblasNoTrans, CblasTrans, batchSize * OpY * OpX, KnY * KnX * InC, KnC, 1., dEdO, KnC, param[ID]->weights, KnC, 0., errinp, KnY * KnX * InC); } } void init(std::mt19937 &gen, const std::vector<Params *> &param) const override { // get pointers to layer's weights and bias Real *const W = param[ID]->weights, *const B = param[ID]->biases; // initialize weights with Xavier initialization const int nAdded = KnX * KnY * InC, nW = param[ID]->nWeights; const Real scale = std::sqrt(6.0 / (nAdded + KnC)); std::uniform_real_distribution<Real> dis(-scale, scale); std::generate(W, W + nW, [&]() { return dis(gen); }); std::fill(B, B + KnC, 0); } };
noble_1962.c
#include "noble_1962.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Normal //sv[0] = -75.5344986658f; // V millivolt //sv[1] = 0.060546727200f; // m dimensionless //sv[2] = 0.725900135500f; // h dimensionless //sv[3] = 0.470923970800f; // n dimensionless // BCL = 300ms | 10 pulses sv[0] = -81.1893; // V millivolt sv[1] = 0.0443563; // m dimensionless sv[2] = 0.851652; // h dimensionless sv[3] = 0.58291; // n dimensionless // BCL = 500ms | 30 pulses //sv[0] = -75.238; //sv[1] = 0.0615111; //sv[2] = 0.718401; //sv[3] = 0.467409; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current); for(int i = 0; i < NEQ; i++) sv[i] = dt*rDY[i] + rY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current) { //State variables const real V_old_ = sv[0]; const real m_old_ = sv[1]; const real h_old_ = sv[2]; const real n_old_ = sv[3]; //Parameters //const real Cm = 12.00000000000000000e+00f; // (microF) //const real g_na_max = 400000.00000000000000000e+00f; // (microS) //const real E_na = 40.00000000000000000e+00f; // (millivolt) //const real g_L = 75.00000000000000000e+00f; // (microS) //const real E_L = -60.00000000000000000e+00f; // (millivolt) const real Cm = 12.0; // (microF) const real g_na_max = 400.0; // (microS) const real E_na = 40.0; // (millivolt) const real g_L = 0.075; // (microS) const real E_L = -60.0; // (millivolt) real calc_I_stim = stim_current; // Algebraics //real g_na = pow(m_old_, 3.00000)*h_old_*g_na_max; //real alpha_m = ( 100.000*(- V_old_ - 48.0000))/(exp((- V_old_ - 48.0000)/15.0000) - 1.00000); //real alpha_h = 170.000*exp((- V_old_ - 90.0000)/20.0000); //real alpha_n = ( 0.100000*(- V_old_ - 50.0000))/(exp((- V_old_ - 50.0000)/10.0000) - 1.00000); //real i_na = (g_na+140.000)*(V_old_ - E_na); //real i_na_no_oscilation = (g_na+122.500)*(V_old_ - E_na); //real beta_m = ( 120.000*(V_old_+8.00000))/(exp((V_old_+8.00000)/5.00000) - 1.00000); //real beta_h = 1000.00/(1.00000+exp((- V_old_ - 42.0000)/10.0000)); //real beta_n = 2.00000*exp((- V_old_ - 90.0000)/80.0000); //real g_K1 = 1200.00*exp((- V_old_ - 90.0000)/50.0000)+ 15.0000*exp((V_old_+90.0000)/60.0000); //real g_K2 = 1200.00*pow(n_old_, 4.00000); //real i_k = (g_K1+g_K2)*(V_old_+100.000); //real i_leak = g_L*(V_old_ - E_L); real g_na = pow(m_old_, 3.00000)*h_old_*g_na_max; real alpha_h = ((1.7e-01*exp((((-V_old_)-9.0e+01)/2.0e+01)))); real alpha_m = (((1.0e-01*((-V_old_)-4.8e+01))/(exp((((-V_old_)-4.8e+01)/1.5e+01))-1.0e+00))); real alpha_n = (((1.0e-04*((-V_old_)-5.0e+01))/(exp((((-V_old_)-5.0e+01)/1.0e+01))-1.0e+00))); real i_na = (g_na+1.4e-01)*(V_old_ - E_na); real i_na_no_oscilation = (g_na+1.2e-01)*(V_old_ - E_na); double beta_m = (((1.2e-01*(V_old_+8.0e+00))/(exp(((V_old_+8.0e+00)/5.0e+00))-1.0e+00))); double beta_h = ((1.0/(1.0e+00+exp((((-V_old_)-4.2e+01)/1.0e+01))))); double beta_n = ((2.0e-03*exp((((-V_old_)-9.0e+01)/8.0e+01)))); real g_K1 = 1.2*exp((((-V_old_)-9.0e+01)/5.0e+01)) + (1.5e-02*exp(((V_old_+9.0e+01)/6.0e+01))); real g_K2 = 1.2*pow(n_old_,4.0e+00); real i_k = (g_K1+g_K2)*(V_old_+100.000); real i_leak = g_L*(V_old_ - E_L); // Rates //rDY_[0] = (- (i_na + i_k + i_leak + calc_I_stim)/Cm) * 1.0E-03; //rDY_[0] = (- (i_na_no_oscilation + i_k + i_leak + calc_I_stim)/Cm) * 1.0E-03; //rDY_[1] = (alpha_m*(1.00000 - m_old_) - beta_m*m_old_) * 1.0E-03; //rDY_[2] = (alpha_h*(1.00000 - h_old_) - beta_h*h_old_) * 1.0E-03; //rDY_[3] = (alpha_n*(1.00000 - n_old_) - beta_n*n_old_) * 1.0E-03; rDY_[0] = (- (i_na + i_k + i_leak + calc_I_stim)/Cm); //rDY_[0] = (- (i_na_no_oscilation + i_k + i_leak + calc_I_stim)/Cm); rDY_[1] = (alpha_m*(1.00000 - m_old_) - beta_m*m_old_); rDY_[2] = (alpha_h*(1.00000 - h_old_) - beta_h*h_old_); rDY_[3] = (alpha_n*(1.00000 - n_old_) - beta_n*n_old_); }
NeuralNetwork_OMP_CPU4.c
/* NEURAL NETWORK OMP CPU4.c * by Lut99 * * Created: * 4/18/2020, 11:25:46 PM * Last edited: * 19/11/2020, 17:18:45 * Auto updated? * Yes * * Description: * The NeuralNetwork class implements a matrix-based Feedforward Neural * Network which is hardcoded to use Mean Squared Error for cost function and * sigmoid as activation function. * * This file implements the fourth of eight different OpenMP-optimised * versions for the CPU. It optimises the innermost loops of the training * function using SIMD. **/ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include <sys/time.h> #include "NeuralNetwork.h" #define WEIGHTS_MIN -3.0 #define WEIGHTS_MAX 3.0 #define BIAS_MIN -3.0 #define BIAS_MAX 3.0 /***** OPENMP DECLARATIONS *****/ extern int omp_set_num_threads(); extern int omp_get_num_procs(); extern int omp_get_thread_num(); /***** HELPER FUNCTIONS *****/ #define TIMEVAL_TO_MS(T_START, T_END) (((T_END.tv_sec - T_START.tv_sec) * 1000000 + (T_END.tv_usec - T_START.tv_usec)) / 1000000.0) extern size_t max(size_t length, const size_t* list); /***** NEURAL NETWORK OPERATIONS *****/ void nn_train(neural_net* nn, size_t n_samples, double** inputs, double** expected, double learning_rate, size_t n_iterations) { #ifdef BENCHMARK // Declare all timers struct timeval s_total, e_total, s_iters, e_iters, s_fwd, e_fwd, s_bck_out, e_bck_out, s_bck_hid, e_bck_hid, s_upd, e_upd; // Set some shortcuts for the timers size_t half_iters = n_iterations / 2; size_t half_samples = n_samples / 2; // Start the total timer gettimeofday(&s_total, NULL); #endif // Also obtain links to all biases / matrices double** biases = nn->biases; double** weights = nn->weights; // Make some shortcuts for the number-of-nodes information size_t n_layers = nn->n_layers; size_t n_weights = nn->n_weights; size_t* nodes_per_layer = nn->nodes_per_layer; // Initialize the temporary delta memory to the correct size double* deltas = malloc(sizeof(double) * max(n_layers, nodes_per_layer)); double* prev_deltas = malloc(sizeof(double) * max(n_layers, nodes_per_layer)); // Create a list that is used to store intermediate outputs. The first input layer (=first column) // is linked and not copied to the input data double* layer_outputs[n_layers]; // Allocate arrays for the other layers for (size_t l = 1; l < n_layers; l++) { layer_outputs[l] = malloc(sizeof(double) * nodes_per_layer[l]); } // Create the delta_biases and delta_weights arrays / matrices double* delta_biases[n_weights]; double* delta_weights[n_weights]; for(size_t l = 0; l < n_weights; l++) { delta_biases[l] = malloc(sizeof(double) * nodes_per_layer[l + 1]); delta_weights[l] = malloc(sizeof(double) * nodes_per_layer[l] * nodes_per_layer[l + 1]); // Fill with zeros for (size_t n = 0; n < nodes_per_layer[l + 1]; n++) { delta_biases[l][n] = 0; for (size_t prev_n = 0; prev_n < nodes_per_layer[l]; prev_n++) { delta_weights[l][prev_n * nodes_per_layer[l + 1] + n] = 0; } } } #ifdef BENCHMARK // Start the iterations timer gettimeofday(&s_iters, NULL); #endif // Perform the training for n_iterations (always) size_t last_nodes = nodes_per_layer[n_layers - 1]; size_t last_prev_nodes = nodes_per_layer[n_layers - 2]; double* last_delta_bias = delta_biases[n_layers - 2]; double* last_delta_weight = delta_weights[n_layers - 2]; for (size_t i = 0; i < n_iterations; i++) { for (size_t s = 0; s < n_samples; s++) { /***** FORWARD PASS *****/ #ifdef BENCHMARK // Start the forward pass timer if (i == half_iters && s == half_samples) { gettimeofday(&s_fwd, NULL); } #endif // Link the input to the first layer outputs layer_outputs[0] = inputs[s]; // Iterate over each layer to feedforward through the network for (size_t l = 1; l < n_layers; l++) { // Get some references to the bias list, weight matrix and outputs of the previous and this layer double* bias = biases[l - 1]; double* weight = weights[l - 1]; double* prev_output = layer_outputs[l - 1]; double* output = layer_outputs[l]; // Compute the activation for each node on this layer size_t this_nodes = nodes_per_layer[l]; size_t prev_nodes = nodes_per_layer[l - 1]; for (size_t n = 0; n < this_nodes; n++) { // Sum the weighted inputs for this node double z = bias[n]; #pragma omp simd for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) { z += prev_output[prev_n] * weight[prev_n * this_nodes + n]; } // Run the activation function over this input and store it in the output output[n] = 1 / (1 + exp(-z)); } } #ifdef BENCHMARK // End the forward timer, start the backward pass output timer if (i == half_iters && s == half_samples) { gettimeofday(&e_fwd, NULL); gettimeofday(&s_bck_out, NULL); } #endif /***** BACKWARD PASS *****/ // Implementation: https://towardsdatascience.com/simple-neural-network-implementation-in-c-663f51447547 // Backpropagate the error from the last layer to the first. double* sample_expected = expected[s]; // Do the output layer: compute the deltas double* output = layer_outputs[n_layers - 1]; #pragma omp simd for (size_t n = 0; n < last_nodes; n++) { double output_val = output[n]; prev_deltas[n] = (sample_expected[n] - output_val) * output_val * (1 - output_val); } // Do the output layer: compute the bias & weight updates // Add all deltas as delta_biases for this layer #pragma omp simd for (size_t n = 0; n < last_nodes; n++) { last_delta_bias[n] += prev_deltas[n]; } // Same for all the weights, except we compute the delta_weights first double* last_prev_output = layer_outputs[n_layers - 2]; for (size_t prev_n = 0; prev_n < last_prev_nodes; prev_n++) { #pragma omp simd for (size_t n = 0; n < last_nodes; n++) { last_delta_weight[prev_n * last_nodes + n] += last_prev_output[prev_n] * prev_deltas[n]; } } #ifdef BENCHMARK // End the backward pass output timer, start the backward pass hidden timer if (i == half_iters && s == half_samples) { gettimeofday(&e_bck_out, NULL); gettimeofday(&s_bck_hid, NULL); } #endif // Then, the rest of the hidden layers for (size_t l = n_layers - 2; l > 0; l--) { double* delta_bias = delta_biases[l - 1]; double* delta_weight = delta_weights[l - 1]; double* output = layer_outputs[l]; double* prev_output = layer_outputs[l - 1]; size_t next_nodes = nodes_per_layer[l + 1]; size_t this_nodes = nodes_per_layer[l]; size_t prev_nodes = nodes_per_layer[l - 1]; // Loop through all nodes in this layer to compute their deltas by summing all deltas of the next layer in a weighted fashion double* weight_next = weights[l]; for (size_t n = 0; n < this_nodes; n++) { // Take the weighted sum of all connection of that node with this layer double error = 0; #pragma omp simd for (size_t next_n = 0; next_n < next_nodes; next_n++) { error += prev_deltas[next_n] * weight_next[n * next_nodes + next_n]; } // Multiply the error with the derivative of the activation function to find the result double output_val = output[n]; deltas[n] = error * output_val * (1 - output_val); } // Add all deltas as delta_biases for this layer #pragma omp simd for (size_t n = 0; n < this_nodes; n++) { delta_bias[n] += deltas[n]; } // Same for all the weights, except we compute the delta_weights first for (size_t prev_n = 0; prev_n < prev_nodes; prev_n++) { #pragma omp simd for (size_t n = 0; n < this_nodes; n++) { delta_weight[prev_n * this_nodes + n] += prev_output[prev_n] * deltas[n]; } } // Swap the two deltas double* temp = deltas; deltas = prev_deltas; prev_deltas = temp; } #ifdef BENCHMARK // End the backward pass hidden timer if (i == half_iters && s == half_samples) { gettimeofday(&e_bck_hid, NULL); } #endif } #ifdef BENCHMARK // Start the updates timer if (i == half_iters) { gettimeofday(&s_upd, NULL); } #endif // Actually update the weights, and reset the delta updates to 0 for next iteration for (size_t l = 0; l < n_weights; l++) { double* bias = biases[l]; double* delta_bias = delta_biases[l]; double* weight = weights[l]; double* delta_weight = delta_weights[l]; // Update the biases & reset delta_biases size_t this_nodes = nodes_per_layer[l + 1]; #pragma omp simd for (size_t n = 0; n < this_nodes; n++) { bias[n] += delta_bias[n] * learning_rate; delta_bias[n] = 0; } // Update the weights & reset delta_weights size_t prev_nodes = nodes_per_layer[l]; #pragma omp simd for (size_t i = 0; i < this_nodes * prev_nodes; i++) { weight[i] += delta_weight[i] * learning_rate; delta_weight[i] = 0; } } #ifdef BENCHMARK // Stop the updates timer if (i == half_iters) { gettimeofday(&e_upd, NULL); } #endif } #ifdef BENCHMARK // End the iterations timer gettimeofday(&e_iters, NULL); #endif // Cleanup // Free the delta biases / weights for(size_t l = 0; l < n_weights; l++) { free(delta_biases[l]); free(delta_weights[l]); } // Free the layer_outputs (skip the first, as these merely link the input rather than copy 'em) for (size_t l = 1; l < n_layers; l++) { free(layer_outputs[l]); } // Cleanup the deltas free(deltas); free(prev_deltas); #ifdef BENCHMARK // End the total timer gettimeofday(&e_total, NULL); // Print the results printf("%f\n", TIMEVAL_TO_MS(s_total, e_total)); printf("%f\n", TIMEVAL_TO_MS(s_iters, e_iters)); printf("%f\n", TIMEVAL_TO_MS(s_fwd, e_fwd)); printf("%f\n", TIMEVAL_TO_MS(s_bck_out, e_bck_out)); printf("%f\n", TIMEVAL_TO_MS(s_bck_hid, e_bck_hid)); printf("%f\n", TIMEVAL_TO_MS(s_upd, e_upd)); #endif } /***** OTHER TOOLS *****/ void parse_opt_args(int argc, char** argv) { // Just ignore the arguments (void) argc; (void) argv; } void print_opt_args() { printf(" - Variation : OpenMP CPU 4 (SIMD only)\n"); }
graph_algo.h
#ifndef _ODPS_GRAPH_ALGO_ #define _ODPS_GRAPH_ALGO_ #include <memory> #include <stack> #include <vector> #include <iostream> #include <fstream> #include <map> #include <omp.h> #include "bitset.h" #include "graph.h" namespace apsara { namespace odps { namespace graph { namespace query { template <typename VType> class GraphAlgo { public: GraphAlgo(const std::shared_ptr<Graph<VType>> &g) { mG = g; size_t sizeBitSet = g->GetNumV(); mVisited = std::shared_ptr<BitSet>(new BitSet(sizeBitSet)); mOddFrontier = std::shared_ptr<BitSet>(new BitSet(sizeBitSet)); mEvenFrontier = std::shared_ptr<BitSet>(new BitSet(sizeBitSet)); } virtual ~GraphAlgo() {} void SetNumThreads(int numThreads) { mNumThreads = numThreads; mVisited->SetNumThreads(numThreads); mOddFrontier->SetNumThreads(numThreads); mEvenFrontier->SetNumThreads(numThreads); } size_t BFS(const VType &vQuery); void StronglyCC(const std::shared_ptr<Graph<VType>> &g, std::map<VType, std::vector<VType>> &sCC); void Visit(const VType &v, VType &index, std::vector<VType> &vIndex, std::vector<VType> &vLowLink, std::vector<bool> &vOnStack, std::stack<VType> &S, const std::shared_ptr<Graph<VType>> &g, std::map<VType, std::vector<VType>> &sCC); private: void TopDownStep(std::shared_ptr<BitSet> &first, std::shared_ptr<BitSet> &second); void BottomUpStep(std::shared_ptr<BitSet> &first, std::shared_ptr<BitSet> &second); size_t GetNumEdgesToCheck(const std::shared_ptr<BitSet> &set, bool testSet); private: int mNumThreads; std::shared_ptr<Graph<VType>> mG; std::shared_ptr<BitSet> mVisited; std::shared_ptr<BitSet> mOddFrontier; std::shared_ptr<BitSet> mEvenFrontier; }; template <typename VType> size_t GraphAlgo<VType>::BFS(const VType &vQuery) { int alpha = 12; mVisited->ReSet(); mOddFrontier->ReSet(); mEvenFrontier->ReSet(); mOddFrontier->Set(vQuery); int step = 0; while((step %2 == 0 && mOddFrontier->Any()) || (step %2 == 1 && mEvenFrontier->Any()) ) { if(step %2 == 0) { if(GetNumEdgesToCheck(mOddFrontier, true) * alpha < GetNumEdgesToCheck(mVisited, false)) { TopDownStep(mOddFrontier, mEvenFrontier); } else { BottomUpStep(mOddFrontier, mEvenFrontier); } } if(step %2 == 1) { if(GetNumEdgesToCheck(mEvenFrontier, true) * alpha < GetNumEdgesToCheck(mVisited, false)) { TopDownStep(mEvenFrontier, mOddFrontier); } else { BottomUpStep(mEvenFrontier, mOddFrontier); } } step++; } return mVisited->IsSet(vQuery) ? mVisited->Count() - 1 : mVisited->Count(); } template <typename VType> size_t GraphAlgo<VType>::GetNumEdgesToCheck(const std::shared_ptr<BitSet> &set, bool testSet) { size_t ret = 0; size_t i; #pragma omp parallel for num_threads(mNumThreads) private(i) reduction(+:ret) for (i=0; i<mG->GetNumV(); ++i) { if(testSet && set->IsSet(i)) { ret += mG->GetDegree(i); } else if(!testSet && !set->IsSet(i)) { ret += mG->GetRevDegree(i); } } return ret; } template <typename VType> void GraphAlgo<VType>::TopDownStep(std::shared_ptr<BitSet> &first, std::shared_ptr<BitSet> &second) { second->ReSet(); size_t i; #pragma omp parallel for num_threads(mNumThreads) private(i) for(i=0; i<first->GetBuckets(); i++) { if(first->Test(i)) { for(VType j=i*8; j<(i+1)*8; j++) { if(first->IsSet(j)) { size_t from, to; std::shared_ptr<VType> &edges = mG->GetEdges(j, from, to); for(size_t k=from; k<to; k++) { VType edge = edges.get()[k]; if(!mVisited->IsSet(edge)) { mVisited->Set(edge); second->Set(edge); } } } } } } } template <typename VType> void GraphAlgo<VType>::BottomUpStep(std::shared_ptr<BitSet> &first, std::shared_ptr<BitSet> &second) { second->ReSet(); VType i; #pragma omp parallel for num_threads(mNumThreads) private(i) for(i=0; i<mVisited->GetSize(); i++) { if(!mVisited->IsSet(i)) { size_t from, to; std::shared_ptr<VType> &edges = mG->GetRevEdges(i, from, to); for(size_t k=from; k<to; k++) { VType edge = edges.get()[k]; if(first->IsSet(edge)) { mVisited->Set(i); second->Set(i); break; } } } } } template <typename VType> void GraphAlgo<VType>::StronglyCC(const std::shared_ptr<Graph<VType>> &g, std::map<VType, std::vector<VType>> &sCC) { std::stack<VType> S; VType index; std::vector<VType> vIndex; std::vector<VType> vLowLink; std::vector<bool> vOnStack; std::vector<VType> vertices = g->GetAllVertices(); vIndex.resize(vertices.size()+1); vLowLink.resize(vertices.size()+1); vOnStack.resize(vertices.size()+1); size_t i; //#pragma omp parallel for num_threads(mNumThreads) private(i) for(i=0; i<vertices.size(); ++i) { vIndex[i] = std::numeric_limits<VType>::max(); vLowLink[i] = std::numeric_limits<VType>::max(); vOnStack[i] = false; } for(size_t i=0; i<vertices.size(); ++i) { if(vIndex[i] == std::numeric_limits<VType>::max()) Visit(i, index, vIndex, vLowLink, vOnStack, S, g, sCC); } std::cout<<"Number of vertices: "<<vertices.size()<<" number of strongly CC: "<<sCC.size()<<std::endl; } template <typename VType> void GraphAlgo<VType>::Visit(const VType &v, VType &index, std::vector<VType> &vIndex, std::vector<VType> &vLowLink, std::vector<bool> &vOnStack, std::stack<VType> &S, const std::shared_ptr<Graph<VType>> &g, std::map<VType, std::vector<VType>> &sCC) { vIndex[v] = index; vLowLink[v] = index; index++; S.push(v); vOnStack[v] = true; size_t from, to; std::shared_ptr<VType> edges = g->GetEdges(v, from, to); for(size_t j=from; j<to; j++) { VType w = edges.get()[j]; if(vIndex[w] == std::numeric_limits<VType>::max()) { Visit(w, index, vIndex, vLowLink, vOnStack, S, g, sCC); vLowLink[v] = std::min(vLowLink[v], vLowLink[w]); } else { vLowLink[v] = std::min(vLowLink[v], vIndex[w]); } } if(vLowLink[v] == vIndex[v]) { VType w=v; while(!S.empty()) { w = S.top(); S.pop(); vOnStack[w] = false; sCC[v].push_back(w); if(v == w) break; } } } } // namespace query } // namespace graph } // namespace odps } // namespace apsara #endif
sycl_qdp_vutils.h
/* * sycl_qdp_utils.h * * Created on: May 23, 2017 * Author: bjoo */ #pragma once #include "qdp.h" #include "dslash/sycl_vtypes.h" #include <CL/sycl.hpp> #include <utils/print_utils.h> #include <lattice/constants.h> #include <lattice/lattice_info.h> #include "dslash/dslash_defaults.h" #include "dslash/dslash_vectype_sycl.h" #include "dslash/dslash_complex.h" #include "dslash/sycl_vtypes.h" //#include "lattice/geometry_utils.h" namespace MG { // Single QDP++ Vector template<typename T, typename VN, typename LF> void QDPLatticeFermionToSyCLCBVSpinor(const LF& qdp_in, SyCLCBFineVSpinor<MGComplex<T>,VN,4>& sycl_out) { auto cb = sycl_out.GetCB(); const QDP::Subset& sub = ( cb == EVEN ) ? QDP::rb[0] : QDP::rb[1]; // Check conformance: IndexType num_gsites=static_cast<IndexType>(sycl_out.GetGlobalInfo().GetNumCBSites()); if ( sub.numSiteTable() != num_gsites ) { MasterLog(ERROR, "%s QDP++ Spinor has different number of sites per checkerboard than the KokkosCBFineSpinor", __FUNCTION__); } IndexType num_sites = static_cast<IndexType>(sycl_out.GetInfo().GetNumCBSites()); if ( num_sites * VN::VecLen != num_gsites ) { MasterLog(ERROR, "%s Veclen of Vector type x num_coarse_sites != num_fine_sites", __FUNCTION__); } auto h_out = sycl_out.GetData().template get_access<cl::sycl::access::mode::write>(); IndexArray coarse_dims = sycl_out.GetInfo().GetCBLatticeDimensions(); IndexArray fine_dims = sycl_out.GetGlobalInfo().GetCBLatticeDimensions(); #pragma omp parallel for for(size_t i=0; i < num_sites; ++i) { IndexArray c_coords = LayoutLeft::coords(i,coarse_dims); for(IndexType color=0; color < 3; ++color) { for(IndexType spin=0; spin < 4; ++spin) { for(IndexType lane =0; lane < VN::VecLen; ++lane) { IndexArray p_coords = LayoutLeft::coords(lane,{VN::Dim0, VN::Dim1, VN::Dim2, VN::Dim3}); IndexArray g_coords; for(IndexType mu=0; mu < 4; ++mu) { g_coords[mu] = c_coords[mu] + p_coords[mu]*coarse_dims[mu]; } IndexType g_idx = LayoutLeft::index(g_coords, fine_dims); IndexType qdp_index = sub.siteTable()[g_idx]; LaneOps<T,VN::VecLen>::insert(h_out(i,spin,color), MGComplex<T>(qdp_in.elem(qdp_index).elem(spin).elem(color).real(), qdp_in.elem(qdp_index).elem(spin).elem(color).imag()), lane); }//lane } // spin } // color } } // Single QDP++ Vector template<typename T, typename VN, typename HF> void QDPLatticeHalfFermionToSyCLCBVSpinor2(const HF& qdp_in, SyCLCBFineVSpinor<MGComplex<T>,VN,2>& sycl_out) { auto cb = sycl_out.GetCB(); const QDP::Subset& sub = ( cb == EVEN ) ? QDP::rb[0] : QDP::rb[1]; // Check conformance: IndexType num_gsites=static_cast<IndexType>(sycl_out.GetGlobalInfo().GetNumCBSites()); if ( sub.numSiteTable() != num_gsites ) { MasterLog(ERROR, "%s QDP++ Spinor has different number of sites per checkerboard than the KokkosCBFineSpinor", __FUNCTION__); } IndexType num_sites = static_cast<IndexType>(sycl_out.GetInfo().GetNumCBSites()); if ( num_sites * VN::VecLen != num_gsites ) { MasterLog(ERROR, "%s Veclen of Vector type x num_coarse_sites != num_fine_sites", __FUNCTION__); } auto h_out = sycl_out.GetData().template get_access<cl::sycl::access::mode::write>(); IndexArray coarse_dims = sycl_out.GetInfo().GetCBLatticeDimensions(); IndexArray fine_dims = sycl_out.GetGlobalInfo().GetCBLatticeDimensions(); #pragma omp parallel for for(size_t i=0; i < num_sites; ++i) { IndexArray c_coords = LayoutLeft::coords(i, coarse_dims); for(IndexType color=0; color < 3; ++color) { for(IndexType spin=0; spin < 2; ++spin) { for(IndexType lane =0; lane < VN::VecLen; ++lane) { IndexArray p_coords = LayoutLeft::coords(lane,{VN::Dim0, VN::Dim1, VN::Dim2, VN::Dim3} ); IndexArray g_coords; for(IndexType mu=0; mu < 4; ++mu) { g_coords[mu] = c_coords[mu] + p_coords[mu]*coarse_dims[mu]; } IndexType g_idx = LayoutLeft::index(g_coords, fine_dims); IndexType qdp_index = sub.siteTable()[g_idx]; LaneOps<T,VN::VecLen>::insert(h_out(i,spin,color), MGComplex<T>(qdp_in.elem(qdp_index).elem(spin).elem(color).real(), qdp_in.elem(qdp_index).elem(spin).elem(color).imag()), lane); }//lane } // spin } // color } } // Single QDP++ vector template<typename T, typename VN, typename LF> void SyCLCBVSpinorToQDPLatticeFermion(const SyCLCBFineVSpinor<MGComplex<T>,VN, 4>& sycl_in, LF& qdp_out) { auto cb = sycl_in.GetCB(); const QDP::Subset& sub = ( cb == EVEN ) ? QDP::rb[0] : QDP::rb[1]; // Check conformance: IndexType num_csites=static_cast<IndexType>(sycl_in.GetInfo().GetNumCBSites()); IndexType num_gsites=static_cast<IndexType>(sycl_in.GetGlobalInfo().GetNumCBSites()); if ( sub.numSiteTable() != num_gsites ) { MasterLog(ERROR, "%s: QDP++ Spinor has different number of sites per checkerboard than the KokkosCBFineSpinor", __FUNCTION__); } if( num_csites * VN::VecLen != num_gsites ) { MasterLog(ERROR, "%s: num_csites * veclen != num_gsites", __FUNCTION__); } typename SyCLCBFineVSpinor<MGComplex<T>,VN, 4>::DataType h_in_view = sycl_in.GetData(); auto h_in = h_in_view.template get_access<cl::sycl::access::mode::read>(); IndexArray c_dims = sycl_in.GetInfo().GetCBLatticeDimensions(); IndexArray g_dims = sycl_in.GetGlobalInfo().GetCBLatticeDimensions(); cl::sycl::cpu_selector cpu; cl::sycl::queue q(cpu); #pragma omp parallel for for(int i=0; i < num_csites; ++i ) { IndexArray c_coords = LayoutLeft::coords(i,c_dims); for(IndexType color=0; color < 3; ++color) { for(IndexType spin=0; spin < 4; ++spin) { for(IndexType lane=0; lane < VN::VecLen;++lane) { IndexArray p_coords=LayoutLeft::coords(lane,{VN::Dim0, VN::Dim1, VN::Dim2, VN::Dim3}); IndexArray g_coords; for(IndexType mu=0; mu < 4; ++mu ) { g_coords[mu] = c_coords[mu] + p_coords[mu]*c_dims[mu]; } IndexType g_index=LayoutLeft::index(g_coords,g_dims); // FIXME. IS the site table available or do I need to wrap it? // This is explicitly on the CPU IndexType qdp_index = sub.siteTable()[g_index]; MGComplex<T> from = LaneOps<T,VN::VecLen>::extract(h_in(i,spin,color), lane); qdp_out.elem(qdp_index).elem(spin).elem(color).real() = from.real(); qdp_out.elem(qdp_index).elem(spin).elem(color).imag() =from.imag(); } // lane } // spin } // color } } // Single QDP++ vector template<typename T, typename VN, typename HF> void SyCLCBVSpinor2ToQDPLatticeHalfFermion(const SyCLCBFineVSpinor<MGComplex<T>,VN, 2>& sycl_in, HF& qdp_out) { auto cb = sycl_in.GetCB(); const QDP::Subset& sub = ( cb == EVEN ) ? QDP::rb[0] : QDP::rb[1]; // Check conformance: IndexType num_csites=static_cast<IndexType>(sycl_in.GetInfo().GetNumCBSites()); IndexType num_gsites=static_cast<IndexType>(sycl_in.GetGlobalInfo().GetNumCBSites()); if ( sub.numSiteTable() != num_gsites ) { MasterLog(ERROR, "%s: QDP++ Spinor has different number of sites per checkerboard than the KokkosCBFineSpinor", __FUNCTION__); } if( num_csites * VN::VecLen != num_gsites ) { MasterLog(ERROR, "%s: num_csites * veclen != num_gsites", __FUNCTION__); } auto h_in = sycl_in.GetData().template get_access<cl::sycl::access::mode::read>(); IndexArray c_dims = sycl_in.GetInfo().GetCBLatticeDimensions(); IndexArray g_dims = sycl_in.GetGlobalInfo().GetCBLatticeDimensions(); #pragma omp parallel for for(int i=0; i< num_csites;++i) { IndexArray c_coords=LayoutLeft::coords(i,c_dims); for(IndexType color=0; color < 3; ++color) { for(IndexType spin=0; spin < 2; ++spin) { for(IndexType lane=0; lane < VN::VecLen;++lane) { IndexArray p_coords = LayoutLeft::coords(lane,{VN::Dim0, VN::Dim1, VN::Dim2, VN::Dim3}); IndexArray g_coords; for(IndexType mu=0; mu < 4; ++mu ) { g_coords[mu] = c_coords[mu] + p_coords[mu]*c_dims[mu]; } IndexType g_index=LayoutLeft::index(g_coords,g_dims); IndexType qdp_index = sub.siteTable()[g_index]; MGComplex<T> v = LaneOps<T,VN::VecLen>::extract(h_in(i,spin,color),lane); qdp_out.elem(qdp_index).elem(spin).elem(color).real() = v.real(); qdp_out.elem(qdp_index).elem(spin).elem(color).imag() = v.imag(); } // lane } // spin } // color } } template<typename T, typename VN, typename GF> void QDPGaugeFieldToSyCLCBVGaugeField(const GF& qdp_in, SyCLCBFineVGaugeField<T,VN>& sycl_out) { auto cb = sycl_out.GetCB(); const QDP::Subset& sub = ( cb == EVEN ) ? QDP::rb[0] : QDP::rb[1]; using FType = typename BaseType<T>::Type; // Check conformance: IndexType num_gsites=static_cast<IndexType>(sycl_out.GetGlobalInfo().GetNumCBSites()); if ( sub.numSiteTable() != num_gsites ) { MasterLog(ERROR, "%s QDP++ Gauge has different number of sites per checkerboard than the KokkosCBFineVGaugeField", __FUNCTION__); } IndexType num_sites = static_cast<IndexType>(sycl_out.GetInfo().GetNumCBSites()); if ( num_sites * VN::VecLen != num_gsites ) { MasterLog(ERROR, "%s Veclen of Vector type x num_coarse_sites != num_fine_sites", __FUNCTION__); } auto h_out = sycl_out.GetData().template get_access<cl::sycl::access::mode::write>(); IndexArray coarse_dims = sycl_out.GetInfo().GetCBLatticeDimensions(); IndexArray fine_dims = sycl_out.GetGlobalInfo().GetCBLatticeDimensions(); cl::sycl::cpu_selector cpu; cl::sycl::queue q(cpu); #pragma omp parallel for for(size_t i=0; i < num_sites; ++i) { IndexArray c_coords = LayoutLeft::coords(i, coarse_dims); for(IndexType dir=0; dir < 4; ++dir) { for(IndexType color=0; color < 3; ++color) { for(IndexType color2=0; color2 < 3; ++color2) { for(IndexType lane =0; lane < VN::VecLen; ++lane) { IndexArray p_coords=LayoutLeft::coords(lane,{VN::Dim0, VN::Dim1, VN::Dim2, VN::Dim3}); IndexArray g_coords; for(IndexType mu=0; mu < 4; ++mu) { g_coords[mu] = c_coords[mu] + p_coords[mu]*coarse_dims[mu]; } IndexType g_idx = LayoutLeft::index(g_coords, fine_dims); IndexType qdp_index = sub.siteTable()[g_idx]; LaneOps<FType,VN::VecLen>::insert( h_out(i,dir,color,color2), MGComplex<FType>(qdp_in[dir].elem(qdp_index).elem().elem(color,color2).real(), qdp_in[dir].elem(qdp_index).elem().elem(color,color2).imag()), lane); }//lane } // color2 } // color } // dir } } template<typename T, typename VN, typename GF> void SyCLCBVGaugeFieldToQDPGaugeField(const SyCLCBFineVGaugeField<MGComplex<T>,VN>& sycl_in, GF& qdp_out) { auto cb = sycl_in.GetCB(); const QDP::Subset& sub = ( cb == EVEN ) ? QDP::rb[0] : QDP::rb[1]; // Check conformance: IndexType num_csites=static_cast<IndexType>(sycl_in.GetInfo().GetNumCBSites()); IndexType num_gsites=static_cast<IndexType>(sycl_in.GetGlobalInfo().GetNumCBSites()); if ( sub.numSiteTable() != num_gsites ) { MasterLog(ERROR, "%s: QDP++ Spinor has different number of sites per checkerboard than the KokkosCBFineSpinor", __FUNCTION__); } if( num_csites * VN::VecLen != num_gsites ) { MasterLog(ERROR, "%s: num_csites * veclen != num_gsites", __FUNCTION__); } typename SyCLCBFineVGaugeField<MGComplex<T>,VN>::DataType h_in_view = sycl_in.GetData(); auto h_in = h_in_view.template get_access<cl::sycl::access::mode::read>(); IndexArray c_dims = sycl_in.GetInfo().GetCBLatticeDimensions(); IndexArray g_dims = sycl_in.GetGlobalInfo().GetCBLatticeDimensions(); #pragma omp parallel for for(size_t i=0; i < num_csites; ++i ) { IndexArray c_coords = LayoutLeft::coords(i,c_dims); for(IndexType dir=0; dir < 4; ++dir) { for(IndexType color=0; color < 3; ++color) { for(IndexType color2=0; color2 < 3; ++color2) { for(IndexType lane=0; lane < VN::VecLen;++lane) { IndexArray p_coords = LayoutLeft::coords(lane,{VN::Dim0, VN::Dim1, VN::Dim2, VN::Dim3}); IndexArray g_coords; for(IndexType mu=0; mu < 4; ++mu ) { g_coords[mu] = c_coords[mu] + p_coords[mu]*c_dims[mu]; } IndexType g_index=LayoutLeft::index(g_coords,g_dims); IndexType qdp_index = sub.siteTable()[g_index]; MGComplex<T> v = LaneOps<T,VN::VecLen>::extract(h_in(i,dir,color,color2),lane ); qdp_out[dir].elem(qdp_index).elem().elem(color,color2).real() = v.real(); qdp_out[dir].elem(qdp_index).elem().elem(color,color2).imag() = v.imag(); } // lane } // color2 } // color }// mu } } template<typename T, typename VN, typename GF> void QDPGaugeFieldToSyCLVGaugeField(const GF& qdp_in, SyCLFineVGaugeField<T,VN>& sycl_out) { QDPGaugeFieldToSyCLCBVGaugeField<T,VN,GF>( qdp_in, sycl_out(EVEN)); QDPGaugeFieldToSyCLCBVGaugeField<T,VN,GF>( qdp_in, sycl_out(ODD)); } template<typename T, typename VN, typename GF> void SyCLVGaugeFieldToQDPGaugeField(const SyCLFineVGaugeField<T,VN>& sycl_in, GF& qdp_out) { SyCLCBVGaugeFieldToQDPGaugeField( sycl_in(EVEN),qdp_out); SyCLCBVGaugeFieldToQDPGaugeField( sycl_in(ODD), qdp_out); } } // namespace
convolutiondepthwise_3x3_pack8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00 = _mm256_loadu_ps(k0); __m256 _k01 = _mm256_loadu_ps(k0 + 8); __m256 _k02 = _mm256_loadu_ps(k0 + 16); __m256 _k10 = _mm256_loadu_ps(k0 + 24); __m256 _k11 = _mm256_loadu_ps(k0 + 32); __m256 _k12 = _mm256_loadu_ps(k0 + 40); __m256 _k20 = _mm256_loadu_ps(k0 + 48); __m256 _k21 = _mm256_loadu_ps(k0 + 56); __m256 _k22 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1); __m256 _sum2 = _bias0; __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k00, _r02, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k01, _r03, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k02, _r04, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k10, _r12, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k11, _r13, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k12, _r14, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k20, _r22, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k21, _r23, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k22, _r24, _sum2); __m256 _sum3 = _bias0; __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r15 = _mm256_loadu_ps(r1 + 40); __m256 _r25 = _mm256_loadu_ps(r2 + 40); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k00, _r03, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k01, _r04, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k02, _r05, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k10, _r13, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k11, _r14, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k12, _r15, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k20, _r23, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k21, _r24, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k22, _r25, _sum3); __m256 _sum4 = _bias0; __m256 _r06 = _mm256_loadu_ps(r0 + 48); __m256 _r16 = _mm256_loadu_ps(r1 + 48); __m256 _r26 = _mm256_loadu_ps(r2 + 48); _mm256_storeu_ps(outptr0 + 24, _sum3); _sum4 = _mm256_comp_fmadd_ps(_k00, _r04, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k01, _r05, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k02, _r06, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k10, _r14, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k11, _r15, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k12, _r16, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k20, _r24, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k21, _r25, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k22, _r26, _sum4); __m256 _sum5 = _bias0; __m256 _r07 = _mm256_loadu_ps(r0 + 56); __m256 _r17 = _mm256_loadu_ps(r1 + 56); __m256 _r27 = _mm256_loadu_ps(r2 + 56); _mm256_storeu_ps(outptr0 + 32, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k00, _r05, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k01, _r06, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k02, _r07, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k10, _r15, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k11, _r16, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k12, _r17, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k20, _r25, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k21, _r26, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k22, _r27, _sum5); __m256 _sum6 = _bias0; __m256 _r08 = _mm256_loadu_ps(r0 + 64); __m256 _r18 = _mm256_loadu_ps(r1 + 64); __m256 _r28 = _mm256_loadu_ps(r2 + 64); _mm256_storeu_ps(outptr0 + 40, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k00, _r06, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k01, _r07, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k02, _r08, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k10, _r16, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k11, _r17, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k12, _r18, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k20, _r26, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k21, _r27, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k22, _r28, _sum6); __m256 _sum7 = _bias0; __m256 _r09 = _mm256_loadu_ps(r0 + 72); __m256 _r19 = _mm256_loadu_ps(r1 + 72); __m256 _r29 = _mm256_loadu_ps(r2 + 72); _mm256_storeu_ps(outptr0 + 48, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k00, _r07, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k01, _r08, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k02, _r09, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k10, _r17, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k11, _r18, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k12, _r19, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k20, _r27, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k21, _r28, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k22, _r29, _sum7); _mm256_storeu_ps(outptr0 + 56, _sum7); r0 += 64; r1 += 64; r2 += 64; outptr0 += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1); __m256 _sum2 = _bias0; __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k00, _r02, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k01, _r03, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k02, _r04, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k10, _r12, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k11, _r13, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k12, _r14, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k20, _r22, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k21, _r23, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k22, _r24, _sum2); __m256 _sum3 = _bias0; __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r15 = _mm256_loadu_ps(r1 + 40); __m256 _r25 = _mm256_loadu_ps(r2 + 40); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k00, _r03, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k01, _r04, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k02, _r05, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k10, _r13, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k11, _r14, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k12, _r15, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k20, _r23, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k21, _r24, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k22, _r25, _sum3); _mm256_storeu_ps(outptr0 + 24, _sum3); r0 += 32; r1 += 32; r2 += 32; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1); _mm256_storeu_ps(outptr0 + 8, _sum1); r0 += 16; r1 += 16; r2 += 16; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); _mm256_storeu_ps(outptr0, _sum0); r0 += 8; r1 += 8; r2 += 8; outptr0 += 8; } r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; } } } static void convdw3x3s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 8; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00 = _mm256_loadu_ps(k0); __m256 _k01 = _mm256_loadu_ps(k0 + 8); __m256 _k02 = _mm256_loadu_ps(k0 + 16); __m256 _k10 = _mm256_loadu_ps(k0 + 24); __m256 _k11 = _mm256_loadu_ps(k0 + 32); __m256 _k12 = _mm256_loadu_ps(k0 + 40); __m256 _k20 = _mm256_loadu_ps(k0 + 48); __m256 _k21 = _mm256_loadu_ps(k0 + 56); __m256 _k22 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r04, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r23, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r24, _sum1); __m256 _sum2 = _bias0; __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r15 = _mm256_loadu_ps(r1 + 40); __m256 _r25 = _mm256_loadu_ps(r2 + 40); __m256 _r06 = _mm256_loadu_ps(r0 + 48); __m256 _r16 = _mm256_loadu_ps(r1 + 48); __m256 _r26 = _mm256_loadu_ps(r2 + 48); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k00, _r04, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k01, _r05, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k02, _r06, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k10, _r14, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k11, _r15, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k12, _r16, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k20, _r24, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k21, _r25, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k22, _r26, _sum2); __m256 _sum3 = _bias0; __m256 _r07 = _mm256_loadu_ps(r0 + 56); __m256 _r17 = _mm256_loadu_ps(r1 + 56); __m256 _r27 = _mm256_loadu_ps(r2 + 56); __m256 _r08 = _mm256_loadu_ps(r0 + 64); __m256 _r18 = _mm256_loadu_ps(r1 + 64); __m256 _r28 = _mm256_loadu_ps(r2 + 64); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k00, _r06, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k01, _r07, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k02, _r08, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k10, _r16, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k11, _r17, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k12, _r18, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k20, _r26, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k21, _r27, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k22, _r28, _sum3); _mm256_storeu_ps(outptr0 + 24, _sum3); r0 += 2 * 32; r1 += 2 * 32; r2 += 2 * 32; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r04, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r23, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r24, _sum1); _mm256_storeu_ps(outptr0 + 8, _sum1); r0 += 2 * 16; r1 += 2 * 16; r2 += 2 * 16; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); _mm256_storeu_ps(outptr0, _sum0); r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; outptr0 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
workspace.h
#ifndef Workspace_H #define Workspace_H #include "logger.h" #include "matrix.h" #include "Printer.h" namespace puma { class Workspace { public: // --- Start Constructors --- // Workspace(long x, long y, long z, short val, double voxelLength) { log = new puma::Logger(); matrix.resize(x,y,z,val); log->emptyLog(); this->voxelLength = voxelLength; printer = new puma::Printer(); } Workspace(long x, long y, long z, double voxelLength) { log = new puma::Logger(); matrix.resize(x,y,z,0); log->emptyLog(); this->voxelLength = voxelLength; printer = new puma::Printer(); } explicit Workspace(double voxelLength) { log = new puma::Logger(); matrix.resize(0,0,0,0); log->emptyLog(); this->voxelLength = voxelLength; printer = new puma::Printer(); } Workspace() { log = new puma::Logger(); matrix.resize(0,0,0,0); log->emptyLog(); this->voxelLength = 1e-6; printer = new puma::Printer(); } explicit Workspace(Workspace *other) { log = new puma::Logger(); matrix.copy(&other->matrix); log->emptyLog(); this->voxelLength = 1e-6; printer = new puma::Printer(); } explicit Workspace(const puma::Vec3<long>& shape) { log = new puma::Logger(); matrix.resize(shape.x,shape.y,shape.z,0); log->emptyLog(); this->voxelLength = 1e-6; printer = new puma::Printer(); } Workspace(long x, long y, long z, short val, double voxelLength, Logger *otherLog) { log = otherLog; matrix.resize(x,y,z,val); this->voxelLength = voxelLength; myLogger = false; printer = new puma::Printer(); } Workspace(long x, long y, long z, double voxelLength, Logger *otherLog) { log = otherLog; matrix.resize(x,y,z,0); this->voxelLength = voxelLength; myLogger = false; printer = new puma::Printer(); } Workspace(double voxelLength, Logger *otherLog) { log = otherLog; matrix.resize(0,0,0,0); this->voxelLength = voxelLength; myLogger = false; printer = new puma::Printer(); } explicit Workspace(Logger *otherLog) { log = otherLog; matrix.resize(0,0,0,0); this->voxelLength = 1e-6; myLogger = false; printer = new puma::Printer(); } Workspace(Workspace *other, Logger *otherLog) { log = otherLog; matrix.copy(&other->matrix); this->voxelLength = 1e-6; myLogger = false; printer = new puma::Printer(); } Workspace(const puma::Vec3<long>& shape, Logger *otherLog) { log = otherLog; matrix.resize(shape.x,shape.y,shape.z,0); this->voxelLength = 1e-6; myLogger = false; printer = new puma::Printer(); } Workspace(long x, long y, long z, short val, double voxelLength, bool logBool) { log = new puma::Logger(logBool); matrix.resize(x,y,z,val); log->emptyLog(); this->voxelLength = voxelLength; myLogger = false; printer = new puma::Printer(); } Workspace(long x, long y, long z, double voxelLength, bool logBool) { log = new puma::Logger(logBool); matrix.resize(x,y,z,0); log->emptyLog(); this->voxelLength = voxelLength; myLogger = false; printer = new puma::Printer(); } Workspace(double voxelLength, bool logBool) { log = new puma::Logger(logBool); matrix.resize(0,0,0,0); log->emptyLog(); this->voxelLength = voxelLength; myLogger = false; printer = new puma::Printer(); } explicit Workspace(bool logBool) { log = new puma::Logger(logBool); matrix.resize(0,0,0,0); log->emptyLog(); this->voxelLength = 1e-6; myLogger = false; printer = new puma::Printer(); } Workspace(Workspace *other, bool logBool) { log = new puma::Logger(logBool); matrix.copy(&other->matrix); log->emptyLog(); this->voxelLength = 1e-6; myLogger = false; printer = new puma::Printer(); } Workspace(const puma::Vec3<long>& shape, bool logBool) { log = new puma::Logger(logBool); matrix.resize(shape.x,shape.y,shape.z,0); log->emptyLog(); this->voxelLength = 1e-6; myLogger = false; printer = new puma::Printer(); } Workspace(const puma::Vec3<long>& shape, double voxelLength, bool logBool) { log = new puma::Logger(logBool); matrix.resize(shape.x,shape.y,shape.z,0); log->emptyLog(); this->voxelLength = voxelLength; myLogger = false; printer = new puma::Printer(); } ~Workspace() { if(myLogger) { delete log; } if(myPrinter) { delete printer; } } // --- End Constructors --- // // --- Start Variables --- // puma::Matrix<short> matrix; puma::Logger *log; puma::Printer *printer; bool myPrinter{true}; bool myLogger{true}; double voxelLength; // --- End Variables --- // // --- Start Functions --- // void newWorkspace(double voxelLength) { matrix.resize(0,0,0,0); log->emptyLog(); this->voxelLength = voxelLength; } void setLogLocation(std::string log_location) { log->emptyLog(log_location); } void setPrinter(puma::Printer *print) { std::cout << "Printer changed to user input" << std::endl; printer = print; myPrinter = false; } void newPrinter() { std::cout << "Printer returned to default" << std::endl; printer = new puma::Printer(); myPrinter = true; } short operator() (long i, long j, long k) { return matrix(i,j,k); } short& at(long i) { return matrix.at(i); } short& at(long i, long j, long k) { return matrix.at(i,j,k); } short& at_safe(long i) { return matrix.at_safe(i); } short& at_safe(long i, long j, long k) { return matrix.at_safe(i,j,k); } long size() { return matrix.size(); } long X() { return matrix.X(); } long Y() { return matrix.Y(); } long Z() { return matrix.Z(); } puma::Vec3<long> shape() { return puma::Vec3<long>(matrix.X(),matrix.Y(),matrix.Z()); } puma::Vec3<long> getShape() { return puma::Vec3<long>(matrix.X(),matrix.Y(),matrix.Z()); } long getLength() { return matrix.size(); } long getSize() { return matrix.size(); } long getX() { return matrix.X(); } long getY() { return matrix.Y(); } long getZ() { return matrix.Z(); } short min() { return matrix.min(); } short max() { return matrix.max(); } double average() { return matrix.average(); } bool crop(long x1, long x2, long y1, long y2, long z1, long z2) { return matrix.crop(x1,x2,y1,y2,z1,z2); } void setSize(long X, long Y, long Z) { if( !( X>0 && Y>0 && Z>0 ) ) { std::cout << "Invalid size. X, Y, and Z must be >0" << std::endl; return; } matrix.resize(X,Y,Z,0); } void resize(long X, long Y, long Z) { if( !( X>0 && Y>0 && Z>0 ) ) { std::cout << "Invalid size. X, Y, and Z must be >0" << std::endl; return; } matrix.resize(X,Y,Z,0); } void setMaterialID(puma::Cutoff cutoff, int identifier) { if(identifier < 0) { return; } if(identifier > 1000) { return; } int X = (int)matrix.X(); int Y = (int)matrix.Y(); int Z = (int)matrix.Z(); #pragma omp parallel for for(long i=0; i<X; i++) { for(long j=0; j<Y; j++) { for(long k=0; k<Z; k++) { short value = matrix(i,j,k); if(value <= cutoff.second && value >= cutoff.first) { matrix(i,j,k) = identifier; } } } } } void setMaterialID(Workspace *other, puma::Cutoff cutoff, int identifier) { if(identifier < 0) { return; } if(identifier > 1000) { return; } int X = (int)matrix.X(); int Y = (int)matrix.Y(); int Z = (int)matrix.Z(); #pragma omp parallel for for(long i=0; i<X; i++) { for(long j=0; j<Y; j++) { for(long k=0; k<Z; k++) { short value = other->matrix(i,j,k); if(value <= cutoff.second && value >= cutoff.first) { matrix(i,j,k) = identifier; } } } } } // --- End Functions --- // }; } #endif // Workspace
MSCHAPv2_bs_fmt_plug.c
/* * MSCHAPv2_fmt.c -- Microsoft PPP CHAP Extensions, Version 2 * * Written by JoMo-Kun <jmk at foofus.net> in 2010 * and placed in the public domain. * * Modified for performance, OMP and utf-8 support * by magnum 2010-2011, no rights reserved * * Modified for using Bitsliced DES by Deepika Dutta Mishra * <dipikadutta at gmail.com> in 2012, no rights reserved. * * Support for freeradius-wep-patch challenge/response format * added by Linus Lüssing in 2012 and is licensed under CC0/PD terms: * To the extent possible under law, Linus Lüssing has waived all copyright * and related or neighboring rights to this work. This work is published from: Germany. * * This algorithm is designed for performing brute-force cracking of the * MSCHAPv2 challenge/response sets exchanged during network-based * authentication attempts. The captured challenge/response set from these * attempts should be stored using the following format: * * USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE * USERNAME::DOMAIN:AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE * DOMAIN\USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE * :::MSCHAPv2 CHALLENGE:MSCHAPv2 RESPONSE: * * For example: * User:::5B5D7C7D7B3F2F3E3C2C602132262628:82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF:21402324255E262A28295F2B3A337C7E * domain\fred:::56d64cbe7bad61349a0b752335100eaf:d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b:7f8a466cff2a6bf0c80218bbf56d76bc * * http://freeradius.org/rfc/rfc2759.txt * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_MSCHAPv2_old; #elif FMT_REGISTERS_H john_register_one(&fmt_MSCHAPv2_old); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "DES_std.h" #include "DES_bs.h" #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "memory.h" #include "sha.h" #include "unicode.h" #include "memdbg.h" #ifndef uchar #define uchar unsigned char #endif #define FORMAT_LABEL "mschapv2-naive" #define FORMAT_NAME "MSCHAPv2 C/R" #define FORMAT_TAG "$MSCHAPv2$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "MD4 DES " DES_BS_ALGORITHM_NAME " naive" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 /* lmcons.h - PWLEN (256) ? 127 ? */ #define USERNAME_LENGTH 256 /* lmcons.h - UNLEN (256) / LM20_UNLEN (20) */ #define DOMAIN_LENGTH 15 /* lmcons.h - CNLEN / DNLEN */ #define BINARY_SIZE 24 #define BINARY_ALIGN 4 #define CHALLENGE_LENGTH 64 #define SALT_SIZE 8 #define SALT_ALIGN 4 #define CIPHERTEXT_LENGTH 48 #define TOTAL_LENGTH 13 + USERNAME_LENGTH + CHALLENGE_LENGTH + CIPHERTEXT_LENGTH #define MIN_KEYS_PER_CRYPT DES_BS_DEPTH #define MAX_KEYS_PER_CRYPT DES_BS_DEPTH static struct fmt_tests tests[] = { {"", "Cricket8", {"testuser1", "", "", "d07054459a1fdbc266a006f0220e6fac", "33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde", "3545cb1d89b507a5de104435e81b14a4"} }, {"$MSCHAPv2$4c092fd3fd98236502e8591100046326$b912ce522524d33123a982cf330a57f8e953fa7974042b5d$6a4915d0ce61d42be533640a75391925$1111", "2222"}, {"$MSCHAPv2$5B5D7C7D7B3F2F3E3C2C602132262628$82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF$21402324255E262A28295F2B3A337C7E$User", "clientPass"}, {"$MSCHAPv2$d07054459a1fdbc266a006f0220e6fac$33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde$3545cb1d89b507a5de104435e81b14a4$testuser1", "Cricket8"}, {"$MSCHAPv2$56d64cbe7bad61349a0b752335100eaf$d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b$7f8a466cff2a6bf0c80218bbf56d76bc$fred", "OMG!BBQ!11!one"}, /* domain\fred */ {"$MSCHAPv2$b3c42db475b881d3c52ff3923d7b3bf8$f07c7a4eb391f5debe32d814679a5a69661b86b33227c4f8$6321f8649b971bd11ce8d5cb22a4a738$bOb", "asdblahblahblahblahblahblahblahblah"}, /* WorkGroup\bOb */ {"$MSCHAPv2$d94e7c7972b2376b28c268583e162de7$eba25a3b04d2c7085d01f842e2befc91745c40db0f792356$0677ca7318fd7f65ae1b4f58c9f4f400$lameuser", ""}, /* no password */ {"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$foo4", "bar4" }, {"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$", "bar4" }, /* Ettercap generated three test vectors */ {"$MSCHAPv2$3D79CC8CDC0261D4$B700770725F87739ADB110B310D9A289CDBB550ADCA6CB86$solar", "solarisalwaysbusy"}, {"$MSCHAPv2$BA75EB14EFBFBF25$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$lulu", "password"}, {"$MSCHAPv2$95A87FA62EBCD2E3C8B09E1B448A6C72$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$E2AE0995EAAC6CEFF0D9757428B51509$lulu", "password"}, /* Single test vector from chapcrack's sample pcap file */ {"$MSCHAPv2$6D0E1C056CD94D5F$1C93ABCE815400686BAECA315F348469256420598A73AD49$moxie", "bPCFyF2uL1p5Lg5yrKmqmY"}, {"", "clientPass", {"User", "", "", "5B5D7C7D7B3F2F3E3C2C602132262628", "82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF", "21402324255E262A28295F2B3A337C7E"} }, {"", "OMG!BBQ!11!one", {"domain\\fred", "", "", "56d64cbe7bad61349a0b752335100eaf", "d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b", "7f8a466cff2a6bf0c80218bbf56d76bc"} }, /* domain\fred */ {"", "", {"lameuser", "", "domain", "d94e7c7972b2376b28c268583e162de7", "eba25a3b04d2c7085d01f842e2befc91745c40db0f792356", "0677ca7318fd7f65ae1b4f58c9f4f400"} }, /* no password */ {"", "asdblahblahblahblahblahblahblahblah", {"WorkGroup\\bOb", "", "", "b3c42db475b881d3c52ff3923d7b3bf8", "f07c7a4eb391f5debe32d814679a5a69661b86b33227c4f8", "6321f8649b971bd11ce8d5cb22a4a738"} }, /* WorkGroup\bOb */ {NULL} }; static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int (*saved_len); static uchar (*saved_key)[21]; static uchar *challenge; static int keys_prepared; static void set_salt(void *salt); static char *long_to_short(char *orig); /* used to cannonicalize the format */ static void init(struct fmt_main *self) { /* LM =2 for DES encryption with no salt and no iterations */ DES_bs_init(2, DES_bs_cpt); #if DES_bs_mt self->params.min_keys_per_crypt = DES_bs_min_kpc; self->params.max_keys_per_crypt = DES_bs_max_kpc; #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); } static void done(void) { MEM_FREE(saved_key); MEM_FREE(saved_len); MEM_FREE(saved_plain); } static int valid_long(char *ciphertext) { char *pos, *pos2; if (ciphertext == NULL) return 0; else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0; if (strlen(ciphertext) > TOTAL_LENGTH) return 0; /* Validate Authenticator/Server Challenge Length */ pos = &ciphertext[FORMAT_TAG_LEN]; for (pos2 = pos; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CHALLENGE_LENGTH / 2)) ) return 0; /* Validate MSCHAPv2 Response Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) ) return 0; /* Validate Peer/Client Challenge Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CHALLENGE_LENGTH / 2)) ) return 0; /* Validate Username Length */ if (strlen(++pos2) > USERNAME_LENGTH) return 0; return 1; } static int valid_short(char *ciphertext) { char *pos, *pos2; if (ciphertext == NULL) return 0; else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0; if (strlen(ciphertext) > TOTAL_LENGTH) return 0; /* Validate MSCHAPv2 Challenge Length */ pos = &ciphertext[FORMAT_TAG_LEN]; for (pos2 = pos; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CHALLENGE_LENGTH / 4)) ) return 0; /* Validate MSCHAPv2 Response Length */ pos2++; pos = pos2; for (; *pos2 != '$'; pos2++) if (atoi16[ARCH_INDEX(*pos2)] == 0x7F) return 0; if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) ) return 0; return 1; } static int valid(char *ciphertext, struct fmt_main *pFmt) { return valid_short(ciphertext) || valid_long(ciphertext); } static char *prepare_long(char *split_fields[10]) { char *username, *cp; /* DOMAIN\USERNAME -or - USERNAME -- ignore DOMAIN */ if ((username = strstr(split_fields[0], "\\")) == NULL) username = split_fields[0]; else username++; cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+1+strlen(split_fields[5])+1+strlen(username)+1); sprintf(cp, "%s%s$%s$%s$%s", FORMAT_TAG, split_fields[3], split_fields[4], split_fields[5], username); if (valid_long(cp)) { char *cp2 = str_alloc_copy(cp); MEM_FREE(cp); return cp2; } MEM_FREE(cp); return split_fields[1]; } static char *prepare_short(char *split_fields[10]) { char *cp; cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+1+1+1); sprintf(cp, "%s%s$%s$$", FORMAT_TAG, split_fields[3], split_fields[4]); if (valid_short(cp)) { char *cp2 = str_alloc_copy(cp); MEM_FREE(cp); return cp2; } MEM_FREE(cp); return split_fields[1]; } static char *prepare(char *split_fields[10], struct fmt_main *pFmt) { char *ret; if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) { // check for a short format that has any extra trash fields, and if so remove them. char *cp1, *cp2, *cp3; cp1 = split_fields[1]; cp1 += FORMAT_TAG_LEN; cp2 = strchr(cp1, '$'); ret = NULL; if (cp2 && cp2-cp1 == CHALLENGE_LENGTH/4) { ++cp2; cp3 = strchr(cp2, '$'); if (cp3 && cp3-cp2 == CIPHERTEXT_LENGTH && (strlen(cp3) > 2 || cp3[2] != '$')) { ret = str_alloc_copy(split_fields[1]); ret[(cp3-split_fields[1])+1] = '$'; ret[(cp3-split_fields[1])+2] = 0; //printf ("Here is the cut item: %s\n", ret); } } } else if (split_fields[0] && split_fields[3] && split_fields[4] && split_fields[5] && strlen(split_fields[3]) == CHALLENGE_LENGTH/2 && strlen(split_fields[4]) == CIPHERTEXT_LENGTH && strlen(split_fields[5]) == CHALLENGE_LENGTH/2) ret = prepare_long(split_fields); else if (split_fields[0] && split_fields[3] && split_fields[4] && strlen(split_fields[3]) == CHALLENGE_LENGTH/4 && strlen(split_fields[4]) == CIPHERTEXT_LENGTH) ret = prepare_short(split_fields); else ret = NULL; if (ret && valid_long(ret)) ret = long_to_short(ret); else if (valid_long(split_fields[1])) ret = long_to_short(split_fields[1]); return ret ? ret : split_fields[1]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char *out; int i, j = 0; if (!out) out = mem_alloc_tiny(TOTAL_LENGTH + 1, MEM_ALIGN_WORD); memset(out, 0, TOTAL_LENGTH + 1); memcpy(out, ciphertext, strlen(ciphertext)); /* convert hashes to lower-case - exclude $MSCHAPv2 and USERNAME */ for (i = FORMAT_TAG_LEN; i < TOTAL_LENGTH + 1 && j < 3; i++) { if (out[i] >= 'A' && out[i] <= 'Z') out[i] |= 0x20; else if (out[i] == '$') j++; } if (valid_long(out)) return long_to_short(out); return out; } static uint32_t *generate_des_format(uchar* binary) { static uint32_t out[6]; ARCH_WORD block[6]; int chr, src,dst,i; uchar value, mask; ARCH_WORD *ptr; memset(block, 0, sizeof(block)); for (chr = 0; chr < 24; chr=chr + 8) { dst = 0; for (i=0; i<8; i++) { value = binary[chr + i]; mask = 0x80; for (src = 0; src < 8; src++) { if (value & mask) block[(chr/4) + (dst>>5)]|= 1U << (dst & 0x1F); mask >>= 1; dst++; } } } /* Apply initial permutation on ciphertext blocks */ for (i=0; i<6; i=i+2) { ptr = DES_do_IP(&block[i]); out[i] = ptr[1]; out[i+1] = ptr[0]; } return out; } static void *get_binary(char *ciphertext) { uchar binary[BINARY_SIZE]; int i; uint32_t *ptr; if (valid_short(ciphertext)) ciphertext += FORMAT_TAG_LEN + CHALLENGE_LENGTH / 4 + 1; /* Skip - $MSCHAPv2$, MSCHAPv2 Challenge */ else ciphertext += FORMAT_TAG_LEN + CHALLENGE_LENGTH / 2 + 1; /* Skip - $MSCHAPv2$, Authenticator Challenge */ for (i=0; i<BINARY_SIZE; i++) { binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4; binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]); } /* Set binary in DES format */ ptr = generate_des_format(binary); return ptr; } inline static void setup_des_key(unsigned char key_56[], int index) { char key[8]; /* Right shift key bytes by 1 to bring in openssl format */ /* Each byte of key is xored with 0x80 to pass check for 0 in DES_bs_set_key() */ key[0] = (key_56[0] >> 1) | 0x80; key[1] = (((key_56[0] << 7) | (key_56[1] >> 1)) >>1) | 0x80; key[2] = (((key_56[1] << 6) | (key_56[2] >> 2)) >>1) | 0x80; key[3] = (((key_56[2] << 5) | (key_56[3] >> 3)) >>1) | 0x80; key[4] = (((key_56[3] << 4) | (key_56[4] >> 4)) >>1) | 0x80; key[5] = (((key_56[4] << 3) | (key_56[5] >> 5)) >>1) | 0x80; key[6] = (((key_56[5] << 2) | (key_56[6] >> 6)) >>1) | 0x80; key[7] = ((key_56[6] << 1) >>1 ) | 0x80; DES_bs_set_key((char*)key, index); } /* Calculate the MSCHAPv2 response for the given challenge, using the specified authentication identity (username), password and client nonce. */ static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i; if (!keys_prepared) { #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i++) { int len; /* Generate 16-byte NTLM hash */ len = E_md4hash((uchar *) saved_plain[i], saved_len[i], saved_key[i]); if (len <= 0) saved_plain[i][-len] = 0; // match truncation /* NULL-padding the 16-byte hash to 21-bytes is made in cmp_exact if needed */ setup_des_key(saved_key[i], i); } keys_prepared = 1; } /* Bitsliced des encryption */ DES_bs_crypt_plain(count); return count; } static int cmp_all(void *binary, int count) { return DES_bs_cmp_all((uint32_t *)binary, count); } static int cmp_one(void *binary, int index) { return DES_bs_cmp_one((uint32_t *)binary, 32, index); } static int cmp_exact(char *source, int index) { uint32_t *binary = get_binary(source); if (!DES_bs_cmp_one(binary, 64, index)) return 0; setup_des_key(&saved_key[index][7], 0); DES_bs_crypt_plain(1); if (!DES_bs_cmp_one(&binary[2], 64, 0)) { setup_des_key(saved_key[0], 0); DES_bs_crypt_plain(1); return 0; } /* NULL-pad 16-byte NTLM hash to 21-bytes (postponed until now) */ memset(&saved_key[index][16], 0, 5); setup_des_key(&saved_key[index][14], 0); DES_bs_crypt_plain(1); if (!DES_bs_cmp_one(&binary[4], 64, 0)) { setup_des_key(saved_key[0], 0); DES_bs_crypt_plain(1); return 0; } setup_des_key(saved_key[0], 0); DES_bs_crypt_plain(1); return 1; } /* Either the cipherext already contains the MSCHAPv2 Challenge (4 Bytes) or we are going to calculate it via: sha1(|Peer/Client Challenge (8 Bytes)|Authenticator/Server Challenge (8 Bytes)|Username (<=256)|) NOTE, we now ONLY call this function the the short form. The long form gets converted into the short form in either prepare or split function. The short form is cannonical form (Change made July, 2014, JimF) */ static void *get_salt(char *ciphertext) { static union { unsigned char u8[SALT_SIZE]; uint32_t u32[SALT_SIZE / 4]; } binary_salt; int i, cnt; uchar j; char *pos = NULL; unsigned char temp[SALT_SIZE]; pos = ciphertext + FORMAT_TAG_LEN; for (i = 0; i < SALT_SIZE; i++) binary_salt.u8[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])]; /* Apply IP to salt */ memset(temp, 0, SALT_SIZE); for (i = 0; i < 64; i++) { cnt = DES_IP[i ^ 0x20]; j = (uchar)((binary_salt.u8[cnt >> 3] >> (7 - (cnt & 7))) & 1); temp[i/8] |= j << (7 - (i % 8)); } memcpy(binary_salt.u8, temp, SALT_SIZE); return (void*)binary_salt.u32; } /* * This function will convert long hashes, into short ones (the short is now cannonical format) * converts * $MSCHAPv2$95a87fa62ebcd2e3c8b09e1b448a6c72$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$e2ae0995eaac6ceff0d9757428b51509$lulu * into * $MSCHAPv2$ba75eb14efbfbf25$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$$ * * This code was moved from get_salt(). */ static char *long_to_short(char *ciphertext) { static char Buf[TOTAL_LENGTH+1]; // larger than we need, but not a big deal static SHA_CTX ctx; unsigned char tmp[16]; unsigned char digest[20]; char *pos = NULL; int i; SHA1_Init(&ctx); /* Peer Challenge */ pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1; /* Skip $MSCHAPv2$, Authenticator Challenge and Response Hash */ memset(tmp, 0, 16); for (i = 0; i < 16; i++) tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])]; SHA1_Update(&ctx, tmp, 16); /* Authenticator Challenge */ pos = ciphertext + FORMAT_TAG_LEN; /* Skip $MSCHAPv2$ */ memset(tmp, 0, 16); for (i = 0; i < 16; i++) tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])]; SHA1_Update(&ctx, tmp, 16); /* Username - Only the user name (as presented by the peer and excluding any prepended domain name) is used as input to SHAUpdate() */ pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1 + 16*2 + 1; /* Skip $MSCHAPv2$, Authenticator, Response and Peer */ SHA1_Update(&ctx, pos, strlen(pos)); SHA1_Final(digest, &ctx); // Ok, now we re-make our ciphertext buffer, into the short cannonical form. strcpy(Buf, FORMAT_TAG); pos = Buf + FORMAT_TAG_LEN; for (i = 0; i < SALT_SIZE; i++) { //binary_salt.u8[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])]; pos[(i<<1)] = itoa16[digest[i]>>4]; pos[(i<<1)+1] = itoa16[digest[i]&0xF]; } memcpy(&pos[16], &ciphertext[42], CIPHERTEXT_LENGTH+2); pos[16+CIPHERTEXT_LENGTH+2] = '$'; pos[16+CIPHERTEXT_LENGTH+3] = 0; //printf ("short=%s original=%s\n", Buf, ciphertext); return Buf; } static void set_salt(void *salt) { challenge = salt; DES_bs_generate_plaintext(challenge); } static void mschapv2_set_key(char *key, int index) { saved_len[index] = strlen(key); memcpy(saved_plain[index], key, saved_len[index] + 1); keys_prepared = 0; } static char *get_key(int index) { return saved_plain[index]; } static int salt_hash(void *salt) { return *(uint32_t *)salt & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_MSCHAPv2_old = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if DES_BS FMT_BS | #if DES_bs_mt FMT_OMP | FMT_OMP_BAD | #endif #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, mschapv2_set_key, get_key, fmt_default_clear_keys, crypt_all, { DES_bs_get_hash_0, DES_bs_get_hash_1, DES_bs_get_hash_2, DES_bs_get_hash_3, DES_bs_get_hash_4, DES_bs_get_hash_5, DES_bs_get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
shuffle.h
//===- shuffle.h - OpenMP variants of the shuffle idiom for all targets -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // Shuffle function implementations for all supported targets. // // Note: We unify the mask type to uint64_t instead of __kmpc_impl_lanemask_t. // //===----------------------------------------------------------------------===// #ifndef LIBOMPTARGET_DEVICERTL_SHUFFLE_H #define LIBOMPTARGET_DEVICERTL_SHUFFLE_H #include <stdint.h> #pragma omp declare target /// External shuffle API /// ///{ extern "C" { int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size); int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size); } ///} /// Forward declarations /// ///{ extern "C" { unsigned GetLaneId(); unsigned __kmpc_get_warp_size(); void __kmpc_impl_unpack(uint64_t val, uint32_t &lo, uint32_t &hi); uint64_t __kmpc_impl_pack(uint32_t lo, uint32_t hi); } ///} /// Fallback implementations of the shuffle sync idiom. /// Unavailable at present (would error at link time if used). /// ///{ int32_t __kmpc_impl_shfl_sync(uint64_t Mask, int32_t Var, int32_t SrcLane); int32_t __kmpc_impl_shfl_down_sync(uint64_t Mask, int32_t Var, uint32_t Delta, int32_t Width); ///} /// AMDGCN implementations of the shuffle sync idiom. /// ///{ #pragma omp begin declare variant match(device = {arch(amdgcn)}) inline int32_t __kmpc_impl_shfl_sync(uint64_t Mask, int32_t Var, int32_t SrcLane) { int Width = __kmpc_get_warp_size(); int Self = GetLaneId(); int Index = SrcLane + (Self & ~(Width - 1)); return __builtin_amdgcn_ds_bpermute(Index << 2, Var); } inline int32_t __kmpc_impl_shfl_down_sync(uint64_t Mask, int32_t Var, uint32_t LaneDelta, int32_t Width) { int Self = GetLaneId(); int Index = Self + LaneDelta; Index = (int)(LaneDelta + (Self & (Width - 1))) >= Width ? Self : Index; return __builtin_amdgcn_ds_bpermute(Index << 2, Var); } #pragma omp end declare variant ///} /// NVPTX implementations of the shuffle and shuffle sync idiom. /// ///{ #pragma omp begin declare variant match( \ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)}) inline int32_t __kmpc_impl_shfl_sync(uint64_t Mask, int32_t Var, int32_t SrcLane) { return __nvvm_shfl_sync_idx_i32(Mask, Var, SrcLane, 0x1f); } inline int32_t __kmpc_impl_shfl_down_sync(uint64_t Mask, int32_t Var, uint32_t Delta, int32_t Width) { int32_t T = ((__kmpc_get_warp_size() - Width) << 8) | 0x1f; return __nvvm_shfl_sync_down_i32(Mask, Var, Delta, T); } #pragma omp end declare variant ///} #pragma omp end declare target #endif
LAGraph_cc_fastsv5.c
//------------------------------------------------------------------------------ // LAGraph_cc_fastsv4: connected components //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2020 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ /** * Code is based on the algorithm described in the following paper * Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component * Algorithm with Fast Convergence (SIAM PP20) * * Modified by Tim Davis, Texas A&M University **/ // The input matrix A must be symmetric. Self-edges (diagonal entries) are // OK, and are ignored. The values and type of A are ignored; just its // pattern is accessed. // The matrix A must have dimension 2^32 or less. If it is larger, use the // 64-bit version of this method instead. TODO combine the two versions into a // single user-callable code. #include "LAGraph.h" //------------------------------------------------------------------------------ // atomic_min_uint32: compute (*p) = min (*p, value), via atomic update //------------------------------------------------------------------------------ static inline void atomic_min_uint32 ( uint32_t *p, // input/output uint32_t value // input ) { uint32_t old, new ; do { // get the old value at (*p) // #pragma omp atomic read old = (*p) ; // compute the new minimum new = LAGRAPH_MIN (old, value) ; } while (!__sync_bool_compare_and_swap (p, old, new)) ; } //------------------------------------------------------------------------------ // Reduce_assign32: w (index) += src, using MIN as the "+=" accum operator //------------------------------------------------------------------------------ // mask = NULL, accumulator = GrB_MIN_UINT32, descriptor = NULL. // Duplicates are summed with the accumulator, which differs from how // GrB_assign works. GrB_assign states that the presence of duplicates results // in undefined behavior. SuiteSparse:GraphBLAS follows the MATLAB rule, which // discards all but the first of the duplicates. TODO: add this to GraphBLAS // as a variant of GrB_assign, either as GxB_assign_accum (or another name), // or as a GxB_* descriptor setting. #define LAGRAPH_FREE_ALL static GrB_Info Reduce_assign32 ( GrB_Vector *w_handle, // vector of size n, all entries present GrB_Vector *s_handle, // vector of size n, all entries present uint32_t *index, // array of size n GrB_Index n, int nthreads ) { GrB_Type w_type, s_type ; GrB_Index w_n, s_n, w_nvals, s_nvals, *w_i, *s_i ; uint32_t *w_x, *s_x ; LAGr_Vector_export (w_handle, &w_type, &w_n, &w_nvals, &w_i, (void **) &w_x, NULL) ; LAGr_Vector_export (s_handle, &s_type, &s_n, &s_nvals, &s_i, (void **) &s_x, NULL) ; #if 0 if (nthreads >= 4) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index k = 0 ; k < n ; k++) { uint32_t i = index [k] ; atomic_min_uint32 (&(w_x [i]), s_x [k]) ; } } else #endif { // sequential version, to avoid atomics for (GrB_Index k = 0 ; k < n ; k++) { uint32_t i = index [k] ; w_x [i] = LAGRAPH_MIN (w_x [i], s_x [k]) ; } } LAGr_Vector_import (w_handle, w_type, w_n, w_nvals, &w_i, (void **) &w_x, NULL) ; LAGr_Vector_import (s_handle, s_type, s_n, s_nvals, &s_i, (void **) &s_x, NULL) ; return (GrB_SUCCESS) ; } #undef LAGRAPH_FREE_ALL #define LAGRAPH_FREE_ALL \ { \ LAGRAPH_FREE (I) ; \ LAGRAPH_FREE (V32) ; \ LAGr_free (&f) ; \ LAGr_free (&gp) ; \ LAGr_free (&mngp) ; \ LAGr_free (&gp_new) ; \ LAGr_free (&mod) ; \ if (sanitize) LAGr_free (&S) ; \ } //------------------------------------------------------------------------------ // LAGraph_cc_fastsv5 //------------------------------------------------------------------------------ GrB_Info LAGraph_cc_fastsv5 ( GrB_Vector *result, // output: array of component identifiers GrB_Matrix A, // input matrix bool sanitize // if true, ensure A is symmetric ) { GrB_Info info ; uint32_t *V32 = NULL ; GrB_Index n, *I = NULL ; GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL ; GrB_Matrix S = NULL ; //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- LAGr_Matrix_nrows (&n, A) ; if (n > UINT32_MAX) { LAGRAPH_ERROR ("problem too large; use 64-bit version instead", GrB_INVALID_VALUE) ; } if (sanitize) { // S = A | A' LAGr_Matrix_new (&S, GrB_BOOL, n, n) ; LAGr_eWiseAdd (S, NULL, NULL, GrB_LOR, A, A, LAGraph_desc_otoo) ; } else { // Use the input as-is, and assume it is symmetric S = A ; } //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- // determine # of threads to use for Reduce_assign int nthreads_max = LAGraph_get_nthreads ( ) ; int nthreads = n / (1024*1024) ; nthreads = LAGRAPH_MIN (nthreads, nthreads_max) ; nthreads = LAGRAPH_MAX (nthreads, 1) ; // # of threads to use for typecast int nthreads2 = n / (64*1024) ; nthreads2 = LAGRAPH_MIN (nthreads2, nthreads_max) ; nthreads2 = LAGRAPH_MAX (nthreads2, 1) ; // vectors LAGr_Vector_new (&f, GrB_UINT32, n) ; LAGr_Vector_new (&gp_new, GrB_UINT32, n) ; LAGr_Vector_new (&mod, GrB_BOOL, n) ; // temporary arrays I = LAGraph_malloc (n, sizeof (GrB_Index)) ; V32 = LAGraph_malloc (n, sizeof (uint32_t)) ; // prepare vectors #pragma omp parallel for num_threads(nthreads2) schedule(static) for (GrB_Index i = 0 ; i < n ; i++) { I [i] = i ; V32 [i] = (uint32_t) i ; } LAGr_Vector_build (f, I, V32, n, GrB_PLUS_UINT32) ; LAGr_Vector_dup (&gp, f) ; LAGr_Vector_dup (&mngp, f) ; //-------------------------------------------------------------------------- // main computation //-------------------------------------------------------------------------- bool diff = true ; while (diff) { // hooking & shortcutting LAGr_mxv (mngp, NULL, GrB_MIN_UINT32, GxB_MIN_SECOND_UINT32, S, gp, NULL) ; LAGRAPH_OK (Reduce_assign32 (&f, &mngp, V32, n, nthreads)) ; LAGr_eWiseAdd (f, NULL, GrB_MIN_UINT32, GrB_MIN_UINT32, mngp, gp, NULL); // calculate grandparent LAGr_Vector_extractTuples (NULL, V32, &n, f) ; #pragma omp parallel for num_threads(nthreads2) schedule(static) for (uint32_t i = 0 ; i < n ; i++) { I [i] = (GrB_Index) V32 [i] ; } LAGr_extract (gp_new, NULL, NULL, f, I, n, NULL) ; // check termination LAGr_eWiseMult (mod, NULL, NULL, GrB_NE_UINT32, gp_new, gp, NULL) ; LAGr_reduce (&diff, NULL, GxB_LOR_BOOL_MONOID, mod, NULL) ; // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- *result = f ; f = NULL ; LAGRAPH_FREE_ALL ; return (GrB_SUCCESS) ; }
openmp_demo.c
#include <stdio.h> #include <omp.h> int main(int argc, char const *argv[]) { int val = 10; #pragma omp parallel { printf("Starting Thread %d --- %d\n", omp_get_thread_num(), val); printf("Finishing Thread %d --- %d\n", omp_get_thread_num(), val); } return 0; }
rii.h
#ifndef RII_H #define RII_H #include <iostream> #include <cassert> #include "pqkmeans.h" #include "./distance.h" // For py::array_t // See http://pybind11.readthedocs.io/en/master/advanced/pycpp/numpy.html#direct-access #include <pybind11/pybind11.h> #include <pybind11/numpy.h> namespace py = pybind11; namespace rii { struct DistanceTable{ // Helper structure. This is identical to vec<vec<float>> dt(M, vec<float>(Ks)) DistanceTable() {} DistanceTable(size_t M, size_t Ks) : Ks_(Ks), data_(M * Ks) {} void SetVal(size_t m, size_t ks, float val) { data_[m * Ks_ + ks] = val; } float GetVal(size_t m, size_t ks) const { return data_[m * Ks_ + ks]; } size_t Ks_; std::vector<float> data_; }; class RiiCpp { public: RiiCpp() {} // Shouldn't be default-constructed RiiCpp(const py::array_t<float> &codewords, bool verbose); // ===== Functions that can be called from Python ===== //void SetCodewords(const py::array_t<float> &codewords); // This should be called first void Reconfigure(int nlist, int iter); void AddCodes(const py::array_t<unsigned char> &codes, bool update_flag); // The default integers of Python is int64 (long long), so the type of target_ids is long long std::pair<std::vector<size_t>, std::vector<float>> QueryLinear(const py::array_t<float> &query, int topk, const py::array_t<long long> &target_ids) const; std::pair<std::vector<size_t>, std::vector<float>> QueryIvf(const py::array_t<float> &query, int topk, const py::array_t<long long> &target_ids, int L) const; void Clear(); // ===== Functions that would not be called from Python (Used inside c++) ===== void UpdatePostingLists(size_t start, size_t num); DistanceTable DTable(const py::array_t<float> &vec) const; float ADist(const DistanceTable &dtable, const std::vector<unsigned char> &code) const; float ADist(const DistanceTable &dtable, const std::vector<unsigned char> &flattened_codes, size_t n) const; std::pair<std::vector<size_t>, std::vector<float>> PairVectorToVectorPair(const std::vector<std::pair<size_t, float>> &pair_vec) const; // Property getter size_t GetN() const {return flattened_codes_.size() / M_;} size_t GetNumList() const {return coarse_centers_.size();} // Given a long (N * M) codes, pick up n-th code std::vector<unsigned char> NthCode(const std::vector<unsigned char> &long_code, size_t n) const; // Given a long (N * M) codes, pick up m-th element from n-th code unsigned char NthCodeMthElement(const std::vector<unsigned char> &long_code, std::size_t n, size_t m) const; // Member variables size_t M_, Ks_; bool verbose_; std::vector<std::vector<std::vector<float>>> codewords_; // (M, Ks, Ds) std::vector<std::vector<unsigned char>> coarse_centers_; // (NumList, M) std::vector<unsigned char> flattened_codes_; // (N, M) PQ codes are flattened to N * M long array std::vector<std::vector<int>> posting_lists_; // (NumList, any) }; RiiCpp::RiiCpp(const py::array_t<float> &codewords, bool verbose) { verbose_ = verbose; const auto &r = codewords.unchecked<3>(); // codewords must have ndim=3, with non-writable M_ = (size_t) r.shape(0); Ks_ = (size_t) r.shape(1); size_t Ds = (size_t) r.shape(2); codewords_.resize(M_, std::vector<std::vector<float>>(Ks_, std::vector<float>(Ds))); for (ssize_t m = 0; m < r.shape(0); ++m) { for (ssize_t ks = 0; ks < r.shape(1); ++ks) { for (ssize_t ds = 0; ds < r.shape(2); ++ds) { codewords_[m][ks][ds] = r(m, ks, ds); } } } if (verbose_) { // Check which SIMD functions are used. See distance.h for this global variable. std::cout << "SIMD support: " << g_simd_architecture << std::endl; } } void RiiCpp::Reconfigure(int nlist, int iter) { assert(0 < nlist); assert((size_t) nlist <= GetN()); // ===== (1) Sampling vectors for pqk-means ===== // Since clustering takes time, we use a subset of all codes for clustering. size_t len_for_clustering = std::min(GetN(), (size_t) nlist * 100); if (verbose_) { std::cout << "The number of vectors used for training of coarse centers: " << len_for_clustering << std::endl; } // Prepare a random set of integers, drawn from [0, ..., N-1], where the cardinality of the set is len_for_clustering std::vector<size_t> ids_for_clustering(GetN()); // This can be large and might be the bootle neck of memory consumption std::iota(ids_for_clustering.begin(), ids_for_clustering.end(), 0); // 0, 1, 2, ... std::shuffle(ids_for_clustering.begin(), ids_for_clustering.end(), std::default_random_engine(123)); ids_for_clustering.resize(len_for_clustering); ids_for_clustering.shrink_to_fit(); // For efficient memory usage std::vector<unsigned char> flattened_codes_randomly_picked; // size=len_for_clustering flattened_codes_randomly_picked.reserve(len_for_clustering * M_); for (const auto &id : ids_for_clustering) { // Pick up vectors to construct a training set std::vector<unsigned char> code = NthCode(flattened_codes_, id); flattened_codes_randomly_picked.insert(flattened_codes_randomly_picked.end(), code.begin(), code.end()); } assert(flattened_codes_randomly_picked.size() == len_for_clustering * M_); // ===== (2) Run pqk-means ===== if (verbose_) {std::cout << "Start to run PQk-means" << std::endl;} pqkmeans::PQKMeans clustering_instance(codewords_, nlist, iter, verbose_); clustering_instance.fit(flattened_codes_randomly_picked); // ===== (3) Update coarse centers ===== coarse_centers_ = clustering_instance.GetClusterCenters(); assert(coarse_centers_.size() == (size_t) nlist); assert(coarse_centers_[0].size() == M_); // ===== (4) Update posting lists ===== if (verbose_) {std::cout << "Start to update posting lists" << std::endl;} posting_lists_.clear(); posting_lists_.resize(nlist); for (auto &posting_list : posting_lists_) { posting_list.reserve(GetN() / nlist); // Roughly malloc } UpdatePostingLists(0, GetN()); } void RiiCpp::AddCodes(const py::array_t<unsigned char> &codes, bool update_flag) { // (1) Add new input codes to flatted_codes. This imply pushes back the elements. // After that, if update_flg=true, (2) update posting lists for the input codes. // Note that update_flag should be true in usual cases. It should be false // if (1) this is the first call of AddCodes (i.e., calling in add_configure()), // of (2) you've decided to call reconfigure() manually after add() if (update_flag && coarse_centers_.empty()) { std::cerr << "Error. reconfigure() must be called before running add(vecs=X, update_posting_lists=True)." << "If this is the first addition, please call add_configure(vecs=X)" << std::endl; throw; } // ===== (1) Add codes to flattened_codes ===== const auto &r = codes.unchecked<2>(); // codes must have ndim=2; with non-writeable size_t N = (size_t) r.shape(0); assert(M_ == (size_t) r.shape(1)); size_t N0 = GetN(); flattened_codes_.resize( (N0 + N) * M_); for (size_t n = 0; n < N; ++n) { for (size_t m = 0; m < M_; ++m) { flattened_codes_[ (N0 + n) * M_ + m] = r(n, m); } } if (verbose_) { std::cout << N << " new vectors are added." << std::endl; std::cout << "Total number of codes is " << GetN() << std::endl; } // ===== (2) Update posting lists ===== if (update_flag) { if (verbose_) { std::cout << "Start to update posting lists" << std::endl; } UpdatePostingLists(N0, N); } } std::pair<std::vector<size_t>, std::vector<float> > RiiCpp::QueryLinear(const py::array_t<float> &query, int topk, const py::array_t<long long> &target_ids) const { const auto &tids = target_ids.unchecked<1>(); // target_ids must have ndim = 1; can be non-writeable size_t S = tids.shape(0); // The number of target_ids. It might be 0 if not specified. assert((size_t) topk <= GetN()); // ===== (1) Create dtable ===== DistanceTable dtable = DTable(query); // ===== (2) Run PQ linear search ===== // [todo] Can be SIMDized? std::vector<std::pair<size_t, float>> scores; if (S == 0) { // No target ids size_t N = GetN(); scores.resize(N); #pragma omp parallel for for (size_t n = 0; n < N; ++n) { scores[n] = {n, ADist(dtable, flattened_codes_, n)}; } } else { // Target ids are specified assert((size_t) topk <= S); assert(S <= GetN()); scores.resize(S); #pragma omp parallel for for (size_t s = 0; s < S; ++s) { size_t tid = static_cast<size_t>(tids(s)); scores[s] = {tid, ADist(dtable, flattened_codes_, tid)}; } } // ===== (3) Sort them ===== // [todo] Can be parallelized? std::partial_sort(scores.begin(), scores.begin() + topk, scores.end(), [](const std::pair<size_t, float> &a, const std::pair<size_t, float> &b){return a.second < b.second;}); scores.resize(topk); scores.shrink_to_fit(); // ===== (4) Return the result, in the form of pair<vec, vec> ===== // Note that this returns two lists, not np.array return PairVectorToVectorPair(scores); } std::pair<std::vector<size_t>, std::vector<float> > RiiCpp::QueryIvf(const py::array_t<float> &query, int topk, const py::array_t<long long> &target_ids, int L) const { const auto &tids = target_ids.unchecked<1>(); // target_ids must have ndim = 1 with non-writeable size_t S = tids.shape(0); // The number of target_ids. It might be 0 if not specified. assert((size_t) topk <= GetN()); assert(topk <= L && (size_t) L <= GetN()); // ===== (1) Create dtable ===== DistanceTable dtable = DTable(query); // ===== (2) Compare to coarse centers and sort the results ===== std::vector<std::pair<size_t, float>> scores_coarse(coarse_centers_.size()); size_t nlist = GetNumList(); //#pragma omp parallel for for (size_t no = 0; no < nlist; ++no) { scores_coarse[no] = {no, ADist(dtable, coarse_centers_[no])}; } // ===== (3) Partial sort the coarse results. ===== size_t w; // The number of posting lists to be considered if (S == 0) { w = (size_t) std::round((double) L * GetNumList() / GetN()); } else { assert((size_t) topk <= S && S <= GetN()); w = (size_t) std::round((double) L * GetNumList() / S); } w += 3; // Top poslists might contain a few items, so we set w litter bit bigger for insurance if (nlist < w) { // If w is bigger than the original nlist, let's set back nlist w = nlist; } std::partial_sort(scores_coarse.begin(), scores_coarse.begin() + w, scores_coarse.end(), [](const std::pair<size_t, float> &a, const std::pair<size_t, float> &b){return a.second < b.second;}); // ===== (4) Traverse posting list ===== std::vector<std::pair<size_t, float>> scores; scores.reserve(L); int coarse_cnt = 0; for (const auto &score_coarse : scores_coarse) { size_t no = score_coarse.first; coarse_cnt++; // [todo] This loop can be parallelized for (const auto &n : posting_lists_[no]) { // ===== (5) If id is not included in target_ids, skip. ===== // Note that if S==0 (target is all), then evaluate all IDs if (S != 0 && !std::binary_search(target_ids.data(), target_ids.data() + S, static_cast<long long>(n))) { continue; } // ===== (6) Evaluate n ===== scores.emplace_back(n, ADist(dtable, flattened_codes_, n)); // ===== (7) If scores are collected enough ===== if (scores.size() == (size_t) L) { goto finish; } } // If w coarse centers are traversed and still L items are not found, // we terminate the process and do the final reranking if ( (size_t) coarse_cnt == w) { finish: // ===== (8) Sort them ===== std::partial_sort(scores.begin(), scores.begin() + topk, scores.end(), [](const std::pair<size_t, float> &a, const std::pair<size_t, float> &b){return a.second < b.second;}); scores.resize(topk); scores.shrink_to_fit(); // ===== (9) Return the result, in the form of pair<vec, vec> ===== // Note that this returns two lists, not np.array return PairVectorToVectorPair(scores); } } // It can be happened that vectors are not found return std::pair<std::vector<size_t>, std::vector<float>>({}, {}); } void RiiCpp::Clear() { coarse_centers_.clear(); flattened_codes_.clear(); posting_lists_.clear(); } void RiiCpp::UpdatePostingLists(size_t start, size_t num) { // Update (add) identifiers to posting lists, from codes[start] to codes[start + num -1] // This just add IDs, so be careful to call this (e.g., the same IDs will be added if you call // this funcs twice at the same time, that would be not expected behavior) assert(start <= GetN()); assert(start + num <= GetN()); // ===== (1) Construct a dummy pqkmeans class for computing Symmetric Distance ===== pqkmeans::PQKMeans clustering_instance(codewords_, GetNumList(), 0, true); clustering_instance.SetClusterCenters(coarse_centers_); // ===== (2) Update posting lists ===== std::vector<int> assign(num); #pragma omp parallel for for (size_t n = 0; n < num; ++n) { assign[n] = clustering_instance.predict_one(NthCode(flattened_codes_, start + n)); } for (size_t n = 0; n < num; ++n) { posting_lists_[assign[n]].push_back(start + n); } } DistanceTable RiiCpp::DTable(const py::array_t<float> &vec) const { const auto &v = vec.unchecked<1>(); size_t Ds = codewords_[0][0].size(); assert((size_t) v.shape(0) == M_ * Ds); DistanceTable dtable(M_, Ks_); for (size_t m = 0; m < M_; ++m) { for (size_t ks = 0; ks < Ks_; ++ks) { dtable.SetVal(m, ks, fvec_L2sqr(&(v(m * Ds)), codewords_[m][ks].data(), Ds)); } } return dtable; } float RiiCpp::ADist(const DistanceTable &dtable, const std::vector<unsigned char> &code) const { assert(code.size() == M_); float dist = 0; for (size_t m = 0; m < M_; ++m) { unsigned char ks = code[m]; dist += dtable.GetVal(m, ks); } return dist; } float RiiCpp::ADist(const DistanceTable &dtable, const std::vector<unsigned char> &flattened_codes, size_t n) const { float dist = 0; for (size_t m = 0; m < M_; ++m) { unsigned char ks = NthCodeMthElement(flattened_codes, n, m); dist += dtable.GetVal(m, ks); } return dist; } std::pair<std::vector<size_t>, std::vector<float> > RiiCpp::PairVectorToVectorPair(const std::vector<std::pair<size_t, float> > &pair_vec) const { std::pair<std::vector<size_t>, std::vector<float>> vec_pair(std::vector<size_t>(pair_vec.size()), std::vector<float>(pair_vec.size())); for(size_t n = 0, N = pair_vec.size(); n < N; ++n) { vec_pair.first[n] = pair_vec[n].first; vec_pair.second[n] = pair_vec[n].second; } return vec_pair; } std::vector<unsigned char> RiiCpp::NthCode(const std::vector<unsigned char> &long_code, size_t n) const { return std::vector<unsigned char>(long_code.begin() + n * M_, long_code.begin() + (n + 1) * M_); } unsigned char RiiCpp::NthCodeMthElement(const std::vector<unsigned char> &long_code, std::size_t n, size_t m) const { return long_code[ n * M_ + m]; } } // namespace rii #endif // RII_H
juegodelavidaOMP.c
// Peña Lorenzo #include <stdio.h> #include <stdlib.h> #include <emmintrin.h> #include <omp.h> int main() { char *grilla,*grilla2,*grilla3,*buffer,*resultado,result,*temp1,*temp2; int cols,rows,juegos,k,j,l,n,r,s,p,t,q,w,z,i,res,bordeFin,sumaAnte,sumaPost; __m128i v1,v2,v3,v4,v5,v6,v7,v8,v9; // Lectura del Archivo FILE *fp; fp=fopen("patron1.cells","r"); fscanf(fp,"cols %i\n rows %i\n steps %i",&cols,&rows,&juegos); cols=cols+16; rows=rows+2; res= cols%16;//Buscar mod bordeFin=cols; if(res==0) { cols=cols+16; } else { cols=cols+(16-res); } if(posix_memalign((void**)&grilla, 16, cols*rows) != 0) return 1; if(posix_memalign((void**)&grilla2, 16, cols*rows) != 0) return 1; if(posix_memalign((void**)&grilla3, 16, cols*rows) != 0) return 1; buffer=malloc((cols+1)*sizeof(char)); resultado=malloc(cols*sizeof(char)); // Ingreso de valores a la matriz resultado=fgets(buffer,cols,fp); memset(buffer,'\0',cols); w=0; while(resultado!=NULL) { #pragma omp for schedule(auto) for(q=16;q<cols+1;q++) { if(buffer[q-16]=='O') { grilla[w*cols+q]=1; } else { grilla[w*cols+q]=0; } } #pragma omp for schedule(auto) for(t=q;t<cols;t++) { grilla[w*cols+q]=0; } if(!feof(fp)) { memset(buffer,'\0',cols); resultado=fgets(buffer,cols,fp); } w=w+1; } //imprimir Matriz for(s=1;s<(rows-1);s++) { for(r=16;r<bordeFin-1;r++) { if(grilla[s*cols+r]) { printf("%c",'O'); } else { printf("%c",'.'); } } printf("%s\n",""); } //fin imprimir for(z=0;z<juegos;z++) { /*Creacion de marco de la matriz*/ grilla[0*cols+15]=grilla[(rows-2)*cols+(bordeFin-2)]; grilla[(rows-1)*cols+(bordeFin-1)]=grilla[1*cols+16]; grilla[(rows-1)*cols+15]=grilla[1*cols+(bordeFin-2)]; grilla[0*cols+(bordeFin-1)]=grilla[(rows-2)*cols+16]; #pragma omp for schedule(auto) for(l=1;l<(rows-1);l++) { grilla[l*cols+15]=grilla[l*cols+(bordeFin-2)]; grilla[l*cols+(bordeFin-1)]=grilla[l*cols+16]; } #pragma omp for schedule(auto) for(n=16;n<(bordeFin-1);n++) { grilla[0*cols+n]=grilla[(rows-2)*cols+n]; grilla[(rows-1)*cols+n]=grilla[1*cols+n]; } //Fin creacion de marco #pragma omp for private(sumaAnte,sumaPost,v1,v2,v3,v4,v5,v6,v7,v8,i,cols) schedule(auto) for(r=1;r<rows-1;r++)//inicio J vida(rows-1 por las 2 filas del marco) { for(i=1;i<(cols/16);i++)//INICIO RECORRIDO COLUMNAS { //inicio construccion marcos interiores sumaAnte=grilla[r*cols+i*16-1] + grilla[(r-1)*cols+i*16-1] + grilla[(r+1)*cols+i*16-1]; sumaPost=grilla[r*cols+i*16+16] + grilla[(r-1)*cols+i*16+16] + grilla[(r+1)*cols+i*16+16]; v7=_mm_set_epi8(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,sumaAnte); v8=_mm_set_epi8(sumaPost,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); //fin construccion marcos interiores v1=_mm_load_si128(((__m128i*)&grilla[r*cols])+i);// fila del medio v3=_mm_load_si128(((__m128i*)&grilla[(r-1)*cols])+i);//fila de arriba v4=_mm_load_si128(((__m128i*)&grilla[(r+1)*cols])+i);//fila de abajo v2=_mm_adds_epi8(v1,v3); v2=_mm_adds_epi8(v2,v4);//suma del medio 'original' v5=_mm_srli_si128(v2,1);//desplaza izq v6=_mm_slli_si128(v2,1);//desplaza der v6=_mm_or_si128(v7,v6);//or entre la shift der y sumaAnte v5=_mm_or_si128(v5,v8);//or entre la shift izq y sumaPost v2=_mm_adds_epi8(v2,v5);//d+d' v2=_mm_adds_epi8(v2,v6);//d+d'+d'' v2=_mm_sub_epi8(v2,v1);//resta la fila del medio (suma) //COMPARACION v2=_mm_or_si128(v2,v1);//OR entre la suma y el valor de la fila del medio, si debe vivir da como resultado 3 v9=_mm_set1_epi8(3); v2=_mm_cmpeq_epi8(v2,v9);//verifico si el resultado es igual a 3 v9=_mm_set1_epi8(1); v2=_mm_and_si128(v2,v9);//AND entre el resultado de comparar y el registro con 1, me van a quedar con 1 solo los que tienen 0xFF //FIN COMPARACION _mm_store_si128(((__m128i*)&grilla2[r*cols])+i, v2);//almaceno la fila calculada en la matriz auxiliar } //FIN RECORRIDO COLUMNAS } //FIN RECORRIDO FILAS /*Intercambio punteros*/ grilla3=grilla; grilla=grilla2; grilla2=grilla3; /*Fin intercambio punteros*/ } printf("%s\n",""); //imprimir Matriz for(s=1;s<(rows-1);s++) { for(r=16;r<bordeFin-1;r++) { if(grilla[s*cols+r]) { printf("%c",'O'); } else { printf("%c",'.'); } } printf("%s\n",""); } //fin imprimir fclose(fp); }
omp_alloc.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <stdint.h> #include <omp.h> #include "omp_testsuite.h" #define ARRAY_SIZE 10000 int test_omp_alloc() { int err; int i, j; int *shared_array; const omp_allocator_t *allocator; const omp_allocator_t *test_allocator; // Currently, only default memory allocator is implemented const omp_allocator_t *allocators[] = { omp_default_mem_alloc, }; err = 0; for (i = 0; i < sizeof(allocators) / sizeof(allocators[0]); ++i) { allocator = allocators[i]; printf("Using %p allocator\n", test_allocator); omp_set_default_allocator(allocator); test_allocator = omp_get_default_allocator(); if (test_allocator != allocator) { printf("error: omp_set|get_default_allocator() not working\n"); return 0; } shared_array = (int *)omp_alloc(sizeof(int) * ARRAY_SIZE, test_allocator); if (shared_array == NULL) { printf("error: shared_array is NULL\n"); return 0; } for (j = 0; j < ARRAY_SIZE; ++j) { shared_array[j] = j; } #pragma omp parallel shared(shared_array) { int i; int tid = omp_get_thread_num(); int *private_array = (int *)omp_alloc(sizeof(int) * ARRAY_SIZE, omp_default_mem_alloc); if (private_array == NULL) { printf("error: thread %d private_array is NULL\n", tid); #pragma omp atomic err++; } for (i = 0; i < ARRAY_SIZE; ++i) { private_array[i] = shared_array[i] + tid; } for (i = 0; i < ARRAY_SIZE; ++i) { if (private_array[i] != i + tid) { printf("error: thread %d element %d is %d instead of %d\n", tid, i, private_array[i], i + tid); #pragma omp atomic err++; } } omp_free(private_array, omp_default_mem_alloc); } /* end of parallel */ omp_free(shared_array, test_allocator); } return !err; } int main() { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_alloc()) { num_failed++; } } return num_failed; }
hello.c
#include <stdio.h> #include <omp.h> int main() { #pragma omp parallel printf("Hello world!\n"); return 0; }
reduce.h
#ifndef REDUCE_H #define REDUCE_H #include <dll.h> //#include <string> #include <helpers/sharedmem.h> #include <stdio.h> #include <helpers/shape.h> #ifdef _OPENMP #include <omp.h> #endif #include <templatemath.h> #include <helper_cuda.h> #include <nd4jmalloc.h> #include <pairwise_util.h> #include <ops/ops.h> #include <ops/special_accumulation_ops.h> #include <op_boilerplate.h> #pragma once #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #endif #ifndef _OPENMP #define omp_get_thread_num() 0 #define omp_get_max_threads() 1 #endif #include "legacy_ops.h" //an op for the kernel namespace functions { namespace reduce { /** * A reduce function * reduces a vector down to * a subset of itself * via aggregating member * elements. */ template<typename T> class ReduceFunction { public: #ifdef __CUDACC__ template<typename OpType> static inline __device__ void transformCuda1D(T *dx, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) { if (OpType::requiresSpecialAccumulation) { OpType::execSpecialCuda(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); return; } //shared memory space for storing intermediate results __shared__ T *sPartials;// = (T *)manager->getSharedReductionBuffer(); __shared__ int tadLength; __shared__ int tadEWS; __shared__ int numTads; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = (T *) shmem; tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jIndex tadOffsetForBlock = tadOffsets[r]; T *rX = dx + tadOffsetForBlock; sPartials[threadIdx.x] = OpType::startingValue(rX); if (tadEWS >= 1) { for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(rX[i * tadEWS], extraParams), extraParams); } } else { __shared__ int tadRank; __shared__ int *tadShape; __shared__ int *tadStride; int xCoord[MAX_RANK]; if (threadIdx.x == 0) { tadRank = shape::rank(tadOnlyShapeInfo); tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); } __syncthreads(); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { shape::ind2subC(tadRank, tadShape, i, xCoord); Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffset], extraParams), extraParams); } } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { result[r] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } } template<typename OpType> static inline __device__ void execScalarCuda( T *dx, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfo, T *reductionBuffer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { int elementWiseStride = shape::elementWiseStride(xShapeInfo); int n = shape::length(xShapeInfo); int tid = blockDim.x * blockIdx.x + threadIdx.x; //shared memory space for storing intermediate results T *sPartials = (T *)manager->getSharedReductionBuffer(); sPartials[threadIdx.x] = OpType::startingValue(dx); if (elementWiseStride >= 1) { for (int i = tid; i < n; i += (blockDim.x * gridDim.x)) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[i * elementWiseStride], extraParams), extraParams); } } else { __shared__ int rank; __shared__ int *xShape; __shared__ int *xStride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xShape = shape::shapeOf(xShapeInfo); xStride = shape::stride(xShapeInfo); } __syncthreads(); int ind2sub[MAX_RANK]; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { shape::ind2subC(rank, xShape, i, ind2sub); Nd4jIndex offset = shape::getOffset(0, xShape, xStride, ind2sub, rank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[offset], extraParams), extraParams); } } __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, n), extraParams); __syncthreads(); if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionBuffer; __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) { reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],n,extraParams); } __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(dx); for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams); } __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams); __syncthreads(); if (threadIdx.x == 0) { result[0] = OpType::postProcess(sPartials[0], n, extraParams); } } } else { if (threadIdx.x == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; result[0] = OpType::postProcess(sPartials[0], n, extraParams); } } } /** * Kernel invocation for reduce * @param n the length of the buffer * @param dx the input * @param xShapeInfo the shape information for the input * @param extraParams extra parameters (starting value,..) * @param result the result buffer * @param resultShapeInfo the shapeinformation for the result buffer * @param gpuInformation the gpu information (shared memory allocated,..) * @param dimension the dimension to do reduce along long * @param dimensionLength the length of the dimension buffer * @param postProcessOrNot whether to reduce or not */ template<typename OpType> static inline __device__ void transformCuda3D( T *dx, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) { if (OpType::requiresSpecialAccumulation) { OpType::execSpecialCuda(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); return; } //shared memory space for storing intermediate results __shared__ T *sPartials; // = (T *)manager->getSharedReductionBuffer(); __shared__ int tadLength; __shared__ int tadRank; __shared__ int numTads; __shared__ int *tadShape; __shared__ int *tadStride; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = (T *) shmem; tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadRank = shape::rank(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); } __syncthreads(); int xCoord[3]; for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jIndex tadOffsetForBlock = tadOffsets[r]; sPartials[threadIdx.x] = OpType::startingValue(dx + tadOffsetForBlock); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { shape::ind2subC(tadRank, tadShape, i, xCoord); Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffset], extraParams), extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) result[r] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } template<typename OpType> static inline __device__ void transformCudaXD( T *dx, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) { if (OpType::requiresSpecialAccumulation) { OpType::execSpecialCuda(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); return; } //shared memory space for storing intermediate results __shared__ T *sPartials; // __shared__ shape::TAD *tad; __shared__ int tadLength; __shared__ int tadRank; __shared__ int numTads; __shared__ int *tadShape; __shared__ int *tadStride; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = (T *) shmem; tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadRank = shape::rank(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); } __syncthreads(); int xCoord[MAX_RANK]; for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jIndex tadOffsetForBlock = tadOffsets[r]; sPartials[threadIdx.x] = OpType::startingValue(dx + tadOffsetForBlock); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { shape::ind2subC(tadRank, tadShape, i, xCoord); Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffset], extraParams), extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) result[r] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } /** * * @param sPartialsRef * @param tid * @param extraParams */ template<typename OpType> __device__ static inline void aggregatePartials(T *sPartials, int tid, int numItems, T *extraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. int floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams); } __syncthreads(); } for (int activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numItems) { sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams); } __syncthreads(); } } #endif /** * Reduce down to 1 number * @param x the input * @param xShapeInfo the shape information * for the input * @param extraParams the extra params * @return */ template<typename OpType> static _CUDA_H T execScalar(T *x, int *xShapeInfo, T *extraParams) { const Nd4jIndex length = shape::length(xShapeInfo); int xElementWiseStride = shape::elementWiseStride(xShapeInfo); if (xElementWiseStride >= 1) { return execScalar<OpType>(x, xElementWiseStride, length, extraParams); } else { int shapeIter[MAX_RANK]; int coord[MAX_RANK]; int dim; int xStridesIter[MAX_RANK]; int *xShape = shape::shapeOf(xShapeInfo); int *xStride = shape::stride(xShapeInfo); T start = OpType::startingValue(x); int rank = shape::rank(xShapeInfo); if (PrepareOneRawArrayIter<T>(rank, xShape, x, xStride, &rank, shapeIter, &x, xStridesIter) >= 0) { ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { /* Process the innermost dimension */ const T *xIter = x; start = OpType::update(start, OpType::op(xIter[0], extraParams), extraParams); } ND4J_RAW_ITER_ONE_NEXT(dim, rank, coord, shapeIter, x, xStridesIter); start = OpType::postProcess(start, shape::length(xShapeInfo), extraParams); } else { printf("Unable to prepare array\n"); } return start; } } static T execScalar(const int opNum, T *x, int *xShapeInfo, T *extraParams) { RETURNING_DISPATCH_BY_OPNUM(execScalar, PARAMS(x, xShapeInfo, extraParams), REDUCE_OPS); } static void exec(const int opNum, T *x, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfoBuffer, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffset) { DISPATCH_BY_OPNUM(exec, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfoBuffer, dimension, dimensionLength, tadShapeInfo, tadOffset), REDUCE_OPS); } /** * Execute on the cpu * @param x the input data * @param xShapeInfo the shape information for x * @param extraParams the extra parameters * @param result the result buffer * @param resultShapeInfoBuffer the shape information * @param dimension the dimension to perform * the reduce along long * @param dimensionLength the length of the dimension buffer */ template<typename OpType> static void _CUDA_H exec(T *x, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfoBuffer, int *dimension, int dimensionLength, int *tadShapeInfo, Nd4jIndex *tadOffset) { int resultLength = shape::length(resultShapeInfoBuffer); //pre squeezed: this is for keeping the pointer to the original //shape information for tad offset //the squeezed information doesn't render the right strides for //tad offset // || tad.wholeThing if (resultLength == 1 || dimension == nullptr || dimensionLength == shape::rank(xShapeInfo)) { result[0] = execScalar<OpType>(x, xShapeInfo, extraParams); return; } if (OpType::requiresSpecialAccumulation) { OpType::execSpecial(x, xShapeInfo, extraParams, result, resultShapeInfoBuffer, dimension, dimensionLength, tadShapeInfo, tadOffset); return; } int *tadOnlyShapeInfo = tadShapeInfo; Nd4jIndex *tadOffsets = tadOffset; shape::TAD *tad = nullptr; if (tadOnlyShapeInfo == nullptr || tadOffsets == nullptr) { tad = new shape::TAD(xShapeInfo, dimension, dimensionLength); tad->createTadOnlyShapeInfo(); tad->createOffsets(); if (tad->dimensionLength < 1) { delete tad; return; } tadOnlyShapeInfo = tad->tadOnlyShapeInfo; tadOffsets = tad->tadOffsets; } const int tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); int numTads = shape::length(xShapeInfo) / tadLength; int tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); int tadsPerThread = resultLength / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); if (tadEWS > 0 && (numTads == 1 || shape::isVector(tadOnlyShapeInfo) || shape::isScalar(tadOnlyShapeInfo))) { #pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) for (int i = 0; i < resultLength; i++) { T *iter = x + tadOffsets[i]; T start = OpType::startingValue(iter); if (tadEWS == 1) { // FIXME: proper reduction should be used here for (int j = 0; j < tadLength; j++) { start = OpType::update(start, OpType::op(iter[j], extraParams), extraParams); } } else { // FIXME: proper reduction to be used here for (int j = 0; j < tadLength; j++) { start = OpType::update(start, OpType::op(iter[j * tadEWS], extraParams), extraParams); } } result[i] = OpType::postProcess(start, tadLength, extraParams); } } else { int *tadShape = shape::shapeOf(tadOnlyShapeInfo); int *tadStride = shape::stride(tadOnlyShapeInfo); int tadRank = shape::rank(tadOnlyShapeInfo); #pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) for (int i = 0; i < resultLength; i++) { Nd4jIndex offset = tadOffsets[i]; int xCoord[MAX_RANK]; T start = OpType::startingValue(x + offset); for (int j = 0; j < tadLength; j++) { shape::ind2subC(tadRank, tadShape, j, xCoord); Nd4jIndex xOffset = shape::getOffset(offset, tadShape, tadStride, xCoord, tadRank); start = OpType::update(start, OpType::op(x[xOffset], extraParams), extraParams); } result[i] = OpType::postProcess(start, tadLength, extraParams);; } } if (tad != nullptr) delete tad; } /** * CPU implementation * @param x the input data * @param xShapeInfo the shape information for * the input data * @param extraParams the extra parameters for the problem * @param result the result buffer * @param resultShapeInfo the shape information */ template<typename OpType> static void _CUDA_H exec(T *x, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfo) { return execScalar<OpType>(x, xShapeInfo, extraParams); } /** * Reduce down to 1 number * @param x the input * @param xShapeInfo the shape information * for the input * @param extraParams the extra params * @return */ template<typename OpType> static T _CUDA_H execScalar(const T *x, int xElementWiseStride, Nd4jIndex length, T *extraParams) { T startingVal = OpType::startingValue(x); if (xElementWiseStride == 1) { if (length < ELEMENT_THRESHOLD) { T local = OpType::startingValue(x); // FIXME: proper reduction to be used here for (Nd4jIndex i = 0; i < length; i++) { T curr = OpType::op(x[i], extraParams); local = OpType::update(local, curr, extraParams); } local = OpType::postProcess(local, length, extraParams); return local; } else { T finalVal = startingVal; BlockInformation info(length, ELEMENT_THRESHOLD); T *blocks = new T[info.threads]; #pragma omp parallel num_threads(info.threads) if (info.threads > 1) proc_bind(AFFINITY) default(shared) { T local = OpType::startingValue(x); for (int i = omp_get_thread_num(); i < info.chunks; i += info.threads) { Nd4jIndex newOffset = (i * info.items); const T *chunk = x + newOffset; Nd4jIndex itemsToLoop = info.items; if (i * info.items >= length) { break; } //handle modulo case if (newOffset + info.items >= length) { itemsToLoop = length - newOffset; } // FIXME: proper reduction should be used here for (Nd4jIndex j = 0; j < itemsToLoop && i * info.items + j < length; j++) { T curr = OpType::op(chunk[j], extraParams); local = OpType::update(local, curr, extraParams); } } blocks[omp_get_thread_num()] = local; } // FIXME: proper reduction should be used here for (int i = 0; i < info.threads; i++) { finalVal = OpType::update(finalVal, blocks[i], extraParams); } finalVal = OpType::postProcess(finalVal, length, extraParams); delete[] blocks; return finalVal; } } else { if (length < ELEMENT_THRESHOLD) { T local = OpType::startingValue(x); // FIXME: proper reduction should be used here for (Nd4jIndex i = 0; i < length; i++) { T curr = OpType::op(x[i * xElementWiseStride], extraParams); local = OpType::update(local, curr, extraParams); } local = OpType::postProcess(local, length, extraParams); return local; } T finalVal = startingVal; BlockInformation info(length, ELEMENT_THRESHOLD); T *blocks = new T[info.threads]; #pragma omp parallel num_threads(info.threads) if (info.threads > 1) proc_bind(AFFINITY) default(shared) { T local = OpType::startingValue(x); for (int i = omp_get_thread_num(); i < info.chunks; i += info.threads) { Nd4jIndex newOffset = (i * info.items) * xElementWiseStride; const T *chunk = x + newOffset; Nd4jIndex itemsToLoop = info.items; if (i * info.items >= length) break; // FIXME: proper reduction should be used here for (Nd4jIndex j = 0; j < itemsToLoop && i * info.items + j < length; j++) { T curr = OpType::op(chunk[j * xElementWiseStride], extraParams); local = OpType::update(local, curr, extraParams); } } blocks[omp_get_thread_num()] = local; } // FIXME: proper reduction should be used here for (int i = 0; i < info.threads; i++) { finalVal = OpType::update(finalVal, blocks[i], extraParams); } finalVal = OpType::postProcess(finalVal, length, extraParams); delete[] blocks; return finalVal; } } }; #ifdef __CUDACC__ /** * * @param extraParams * @param sPartials * @param sMemSize */ template<typename T> __device__ void initializeShared(T *extraParams, T **sPartials, int sMemSize) { int sPartialsLength = sMemSize / sizeof(T); T *sPartialsDeref = (T *) *sPartials; for (int i = 0; i < sPartialsLength; i++) { sPartialsDeref[i] = extraParams[0]; } } #endif } } #ifdef __CUDACC__ /** * Interface for the c and driver api * @param op the operation number * @param n the length of the problem * @param dx the input information * @param xShapeInfo the shape information * @param extraParams the extra parameters * @param result the result data * @param resultShapeInfo the result shape information * @param gpuInformation the gpu information * @param dimension the dimension to do reduce along long * @param dimensionLength the length of the dimension buffer * @param postProcessOrNot whether to pre process or not */ template <typename T, typename OpClass> __device__ void reduceSimpleGeneric( T *dx, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce::ReduceFunction<T>), sizeof(shape::TAD), shape::rank(xShapeInfo)); } __syncthreads(); functions::reduce::ReduceFunction<T>::template transformCudaXD<OpClass>( dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); } template <typename T, typename OpClass> __device__ void reduceSimpleGeneric1D( T *dx, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) { functions::reduce::ReduceFunction<T>::template transformCuda1D<OpClass>( dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, nullptr, tadOnlyShapeInfo, tadOffsets); } template <typename T, typename OpClass> __device__ void reduceSimpleGeneric3D( T *dx, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, int *tadOnlyShapeInfo, Nd4jIndex *tadOffsets) { functions::reduce::ReduceFunction<T>::template transformCuda3D<OpClass>( dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, nullptr, tadOnlyShapeInfo, tadOffsets); } template <typename T, typename OpClass> __device__ void reduceScalarGeneric( T *dx, int *xShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, int *tadOnlyShapeInfo) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce::ReduceFunction<T>), sizeof(shape::TAD), 0); } __syncthreads(); functions::reduce::ReduceFunction<T>::template execScalarCuda<OpClass>( dx, xShapeInfo, extraParams, result, resultShapeInfo, reductionBuffer, manager, tadOnlyShapeInfo); }; /* */ // reduceScalar DISPATCH_KERNEL_SIMPLE(reduceScalarSimple_, reduceScalarGeneric, float, INPUT(float *x, int *xShapeInfo, float *extraParams, float *z, int *zShapeInfo, int *dimension, int dimensionLength, float *reductionBuffer, int *tadOnlyShapeInfo), PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceScalarSimple_, reduceScalarGeneric, double, INPUT(double *x, int *xShapeInfo, double *extraParams, double *z, int *zShapeInfo, int *dimension, int dimensionLength, double *reductionBuffer, int *tadOnlyShapeInfo), PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceScalarSimple_, reduceScalarGeneric, float16, INPUT(float16 *x, int *xShapeInfo, float16 *extraParams, float16 *z, int *zShapeInfo, int *dimension, int dimensionLength, float16 *reductionBuffer, int *tadOnlyShapeInfo), PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) // reduce1D DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric1D_, reduceSimpleGeneric1D, float, INPUT(float *x, int *xShape, float *extraParams, float *z, int *zShape, int *dimension, int dimensionLength, float *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric1D_, reduceSimpleGeneric1D, double, INPUT(double *x, int *xShape, double *extraParams, double *z, int *zShape, int *dimension, int dimensionLength, double *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric1D_, reduceSimpleGeneric1D, float16, INPUT(float16 *x, int *xShape, float16 *extraParams, float16 *z, int *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) // reduce3D DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric3D_, reduceSimpleGeneric3D, float, INPUT(float *x, int *xShape, float *extraParams, float *z, int *zShape, int *dimension, int dimensionLength, float *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric3D_, reduceSimpleGeneric3D, double, INPUT(double *x, int *xShape, double *extraParams, double *z, int *zShape, int *dimension, int dimensionLength, double *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric3D_, reduceSimpleGeneric3D, float16, INPUT(float16 *x, int *xShape, float16 *extraParams, float16 *z, int *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) // reduceXD DISPATCH_KERNEL_SIMPLE(reduceSimpleGenericXD_, reduceSimpleGeneric, float, INPUT(float *x, int *xShape, float *extraParams, float *z, int *zShape, int *dimension, int dimensionLength, float *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGenericXD_, reduceSimpleGeneric, double, INPUT(double *x, int *xShape, double *extraParams, double *z, int *zShape, int *dimension, int dimensionLength, double *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGenericXD_, reduceSimpleGeneric, float16, INPUT(float16 *x, int *xShape, float16 *extraParams, float16 *z, int *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) #endif #endif
JeeIOrbitalSoA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H #define QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H #include "Configuration.h" #if QMC_BUILD_LEVEL<5 #include "QMCWaveFunctions/WaveFunctionComponent.h" #endif #include "Particle/DistanceTableData.h" #include <simd/allocator.hpp> #include <simd/algorithm.hpp> #include <map> #include <numeric> namespace qmcplusplus { /** @ingroup WaveFunctionComponent * @brief Specialization for three-body Jastrow function using multiple functors * *Each pair-type can have distinct function \f$u(r_{ij})\f$. *For electrons, distinct pair correlation functions are used *for spins up-up/down-down and up-down/down-up. */ template<class FT> class JeeIOrbitalSoA: public WaveFunctionComponent { ///type of each component U, dU, d2U; using valT=typename FT::real_type; ///element position type using posT=TinyVector<valT,OHMMS_DIM>; ///use the same container using RowContainer=DistanceTableData::RowContainer; ///table index for i-el, el-el is always zero int myTableID; //nuber of particles int Nelec, Nion; ///number of particles + padded size_t Nelec_padded; //number of groups of the target particleset int eGroups, iGroups; ///reference to the sources (ions) const ParticleSet& Ions; ///diff value RealType DiffVal; ///\f$Uat[i] = sum_(j) u_{i,j}\f$ Vector<valT> Uat,oldUk,newUk; ///\f$dUat[i] = sum_(j) du_{i,j}\f$ using gContainer_type=VectorSoaContainer<valT,OHMMS_DIM>; gContainer_type dUat,olddUk,newdUk; ///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$ Vector<valT> d2Uat,oldd2Uk,newd2Uk; /// current values during PbyP valT cur_Uat,cur_d2Uat; posT cur_dUat, dUat_temp; ///container for the Jastrow functions Array<FT*,3> F; std::map<std::string,FT*> J3Unique; //YYYY std::map<FT*,int> J3UniqueIndex; /// the cutoff for e-I pairs std::vector<valT> Ion_cutoff; /// the electrons around ions within the cutoff radius, grouped by species Array<std::vector<int>,2> elecs_inside; Array<std::vector<valT>,2> elecs_inside_dist; Array<std::vector<posT>,2> elecs_inside_displ; /// the ids of ions within the cutoff radius of an electron on which a move is proposed std::vector<int> ions_nearby_old, ions_nearby_new; /// work buffer size size_t Nbuffer; /// compressed distances aligned_vector<valT> Distjk_Compressed, DistkI_Compressed, DistjI_Compressed; std::vector<int> DistIndice_k; /// compressed displacements gContainer_type Disp_jk_Compressed, Disp_jI_Compressed, Disp_kI_Compressed; /// work result buffer VectorSoaContainer<valT,9> mVGL; // Used for evaluating derivatives with respect to the parameters int NumVars; Array<std::pair<int,int>,3> VarOffset; Vector<RealType> dLogPsi; Array<PosType,2> gradLogPsi; Array<RealType,2> lapLogPsi; // Temporary store for parameter derivatives of functor // The first index is the functor index in J3Unique. The second is the parameter index w.r.t. to that // functor std::vector<std::vector<RealType> > du_dalpha; std::vector<std::vector<PosType> > dgrad_dalpha; std::vector<std::vector<Tensor<RealType,3> > > dhess_dalpha; public: ///alias FuncType using FuncType=FT; JeeIOrbitalSoA(const ParticleSet& ions, ParticleSet& elecs, bool is_master=false) : Ions(ions), NumVars(0) { OrbitalName = "JeeIOrbitalSoA"; myTableID=elecs.addTable(Ions,DT_SOA); elecs.DistTables[myTableID]->Need_full_table_loadWalker=true; init(elecs); } ~JeeIOrbitalSoA() { } WaveFunctionComponentPtr makeClone(ParticleSet& elecs) const { JeeIOrbitalSoA<FT>* eeIcopy= new JeeIOrbitalSoA<FT>(Ions, elecs, false); std::map<const FT*,FT*> fcmap; for (int iG=0; iG<iGroups; iG++) for (int eG1=0; eG1<eGroups; eG1++) for (int eG2=0; eG2<eGroups; eG2++) { if(F(iG,eG1,eG2)==0) continue; typename std::map<const FT*,FT*>::iterator fit=fcmap.find(F(iG,eG1,eG2)); if(fit == fcmap.end()) { FT* fc=new FT(*F(iG,eG1,eG2)); eeIcopy->addFunc(iG, eG1, eG2, fc); fcmap[F(iG,eG1,eG2)]=fc; } } // Ye: I don't like the following memory allocated by default. eeIcopy->myVars.clear(); eeIcopy->myVars.insertFrom(myVars); eeIcopy->NumVars=NumVars; eeIcopy->dLogPsi.resize(NumVars); eeIcopy->gradLogPsi.resize(NumVars,Nelec); eeIcopy->lapLogPsi.resize(NumVars,Nelec); eeIcopy->VarOffset=VarOffset; eeIcopy->Optimizable = Optimizable; return eeIcopy; } void init(ParticleSet& p) { Nelec=p.getTotalNum(); Nelec_padded=getAlignedSize<valT>(Nelec); Nion = Ions.getTotalNum(); iGroups=Ions.getSpeciesSet().getTotalNum(); eGroups=p.groups(); Uat.resize(Nelec); dUat.resize(Nelec); d2Uat.resize(Nelec); oldUk.resize(Nelec); olddUk.resize(Nelec); oldd2Uk.resize(Nelec); newUk.resize(Nelec); newdUk.resize(Nelec); newd2Uk.resize(Nelec); F.resize(iGroups,eGroups,eGroups); F=nullptr; elecs_inside.resize(eGroups,Nion); elecs_inside_dist.resize(eGroups,Nion); elecs_inside_displ.resize(eGroups,Nion); ions_nearby_old.resize(Nion); ions_nearby_new.resize(Nion); Ion_cutoff.resize(Nion, 0.0); //initialize buffers Nbuffer=Nelec; mVGL.resize(Nbuffer); Distjk_Compressed.resize(Nbuffer); DistjI_Compressed.resize(Nbuffer); DistkI_Compressed.resize(Nbuffer); Disp_jk_Compressed.resize(Nbuffer); Disp_jI_Compressed.resize(Nbuffer); Disp_kI_Compressed.resize(Nbuffer); DistIndice_k.resize(Nbuffer); } void initUnique() { typename std::map<std::string,FT*>::iterator it(J3Unique.begin()),it_end(J3Unique.end()); du_dalpha.resize(J3Unique.size()); dgrad_dalpha.resize(J3Unique.size()); dhess_dalpha.resize(J3Unique.size()); int ifunc=0; while(it != it_end) { J3UniqueIndex[it->second]=ifunc; FT &functor = *(it->second); int numParams = functor.getNumParameters(); du_dalpha[ifunc].resize(numParams); dgrad_dalpha[ifunc].resize(numParams); dhess_dalpha[ifunc].resize(numParams); ++it; ifunc++; } } void addFunc(int iSpecies, int eSpecies1, int eSpecies2, FT* j) { if(eSpecies1==eSpecies2) { //if only up-up is specified, assume spin-unpolarized correlations if(eSpecies1==0) for (int eG1=0; eG1<eGroups; eG1++) for (int eG2=0; eG2<eGroups; eG2++) { if(F(iSpecies,eG1,eG2)==0) F(iSpecies,eG1,eG2)=j; } } else { F(iSpecies,eSpecies1,eSpecies2) = j; F(iSpecies,eSpecies2,eSpecies1) = j; } if(j) { RealType rcut = 0.5 * j->cutoff_radius; for (int i=0; i<Nion; i++) if (Ions.GroupID[i] == iSpecies) Ion_cutoff[i] = rcut; } else { APP_ABORT("JeeIOrbitalSoA::addFunc Jastrow function pointer is NULL"); } std::stringstream aname; aname << iSpecies << "_" << eSpecies1 << "_" << eSpecies2; J3Unique[aname.str()]=j; initUnique(); } /** check that correlation information is complete */ void check_complete() { //check that correlation pointers are either all 0 or all assigned bool complete = true; for(int i=0; i<iGroups; ++i) { int nfilled = 0; bool partial; for(int e1=0; e1<eGroups; ++e1) for(int e2=0; e2<eGroups; ++e2) if(F(i,e1,e2)!=0) nfilled++; partial = nfilled>0 && nfilled<eGroups*eGroups; if(partial) app_log() << "J3 eeI is missing correlation for ion "<<i<< std::endl; complete = complete && !partial; } if(!complete) { APP_ABORT("JeeIOrbitalSoA::check_complete J3 eeI is missing correlation components\n see preceding messages for details"); } //first set radii for(int i=0; i<Nion; ++i) { FT* f = F(Ions.GroupID[i],0,0); if(f!=0) Ion_cutoff[i] = .5*f->cutoff_radius; } //then check radii bool all_radii_match = true; for(int i=0; i<iGroups; ++i) { if(F(i,0,0)!=0) { bool radii_match = true; RealType rcut = F(i,0,0)->cutoff_radius; for(int e1=0; e1<eGroups; ++e1) for(int e2=0; e2<eGroups; ++e2) radii_match = radii_match && F(i,e1,e2)->cutoff_radius==rcut; if(!radii_match) app_log() << "eeI functors for ion species " << i << " have different radii"<< std::endl; all_radii_match = all_radii_match && radii_match; } } if(!all_radii_match) { APP_ABORT("JeeIOrbitalSoA::check_radii J3 eeI are inconsistent for some ion species\n see preceding messages for details"); } } //evaluate the distance table with els void resetTargetParticleSet(ParticleSet& P) {} /** check in an optimizable parameter * @param o a super set of optimizable variables */ void checkInVariables(opt_variables_type& active) { myVars.clear(); typename std::map<std::string,FT*>::iterator it(J3Unique.begin()),it_end(J3Unique.end()); while(it != it_end) { (*it).second->checkInVariables(active); (*it).second->checkInVariables(myVars); ++it; } } /** check out optimizable variables */ void checkOutVariables(const opt_variables_type& active) { myVars.clear(); typename std::map<std::string,FT*>::iterator it(J3Unique.begin()),it_end(J3Unique.end()); while (it != it_end) { (*it).second->myVars.getIndex(active); myVars.insertFrom((*it).second->myVars); ++it; } myVars.getIndex(active); NumVars=myVars.size(); if (NumVars) { dLogPsi.resize(NumVars); gradLogPsi.resize(NumVars,Nelec); lapLogPsi.resize(NumVars,Nelec); VarOffset.resize(iGroups, eGroups, eGroups); int varoffset=myVars.Index[0]; for (int ig=0; ig<iGroups; ig++) for (int jg=0; jg<eGroups; jg++) for (int kg=0; kg<eGroups; kg++) { FT *func_ijk = F(ig, jg, kg); if(func_ijk==nullptr) continue; VarOffset(ig,jg,kg).first = func_ijk->myVars.Index.front()-varoffset; VarOffset(ig,jg,kg).second = func_ijk->myVars.Index.size()+VarOffset(ig,jg,kg).first; } } } ///reset the value of all the unique Two-Body Jastrow functions void resetParameters(const opt_variables_type& active) { if(!Optimizable) return; typename std::map<std::string,FT*>::iterator it(J3Unique.begin()),it_end(J3Unique.end()); while(it != it_end) { (*it++).second->resetParameters(active); } for(int i=0; i<myVars.size(); ++i) { int ii=myVars.Index[i]; if(ii>=0) myVars[i]= active[ii]; } } /** print the state, e.g., optimizables */ void reportStatus(std::ostream& os) { typename std::map<std::string,FT*>::iterator it(J3Unique.begin()),it_end(J3Unique.end()); while(it != it_end) { (*it).second->myVars.print(os); ++it; } } void build_compact_list(ParticleSet& P) { const DistanceTableData& eI_table=(*P.DistTables[myTableID]); for(int iat=0; iat<Nion; ++iat) for(int jg=0; jg<eGroups; ++jg) { elecs_inside(jg,iat).clear(); elecs_inside_dist(jg,iat).clear(); elecs_inside_displ(jg,iat).clear(); } for(int jg=0; jg<eGroups; ++jg) for(int jel=P.first(jg); jel<P.last(jg); jel++) for(int iat=0; iat<Nion; ++iat) if(eI_table.Distances[jel][iat]<Ion_cutoff[iat]) { elecs_inside(jg,iat).push_back(jel); elecs_inside_dist(jg,iat).push_back(eI_table.Distances[jel][iat]); elecs_inside_displ(jg,iat).push_back(eI_table.Displacements[jel][iat]); } } RealType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) { evaluateGL(P,G,L,true); return LogValue; } ValueType ratio(ParticleSet& P, int iat) { UpdateMode=ORB_PBYP_RATIO; const DistanceTableData& eI_table=(*P.DistTables[myTableID]); const DistanceTableData& ee_table=(*P.DistTables[0]); cur_Uat=computeU(P, iat, P.GroupID[iat], eI_table.Temp_r.data(), ee_table.Temp_r.data(), ions_nearby_new); DiffVal=Uat[iat]-cur_Uat; return std::exp(DiffVal); } void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios) { for(int k=0; k<ratios.size(); ++k) ratios[k]=std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.refPS.GroupID[VP.refPtcl], VP.DistTables[myTableID]->Distances[k], VP.DistTables[0]->Distances[k], ions_nearby_old)); } void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios) { const DistanceTableData* d_table=P.DistTables[0]; const DistanceTableData& eI_table=(*P.DistTables[myTableID]); const DistanceTableData& ee_table=(*P.DistTables[0]); for(int jg=0; jg<eGroups; ++jg) { const valT sumU=computeU(P, -1, jg, eI_table.Temp_r.data(), ee_table.Temp_r.data(), ions_nearby_new); for(int j=P.first(jg); j<P.last(jg); ++j) { // remove self-interaction valT Uself(0); for(int iat=0; iat<Nion; ++iat) { const valT &r_Ij = eI_table.Temp_r[iat]; const valT &r_Ik = eI_table.Distances[j][iat]; if(r_Ij<Ion_cutoff[iat]&&r_Ik<Ion_cutoff[iat]) { const int ig=Ions.GroupID[iat]; Uself+=F(ig,jg,jg)->evaluate(ee_table.Temp_r[j],r_Ij,r_Ik); } } ratios[j]=std::exp(Uat[j]+Uself-sumU); } } } GradType evalGrad(ParticleSet& P, int iat) { return GradType(dUat[iat]); } ValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat) { UpdateMode=ORB_PBYP_PARTIAL; const DistanceTableData& eI_table=(*P.DistTables[myTableID]); const DistanceTableData& ee_table=(*P.DistTables[0]); computeU3(P, iat, eI_table.Temp_r.data(), eI_table.Temp_dr, ee_table.Temp_r.data(), ee_table.Temp_dr, cur_Uat, cur_dUat, cur_d2Uat, newUk, newdUk, newd2Uk, ions_nearby_new); DiffVal=Uat[iat]-cur_Uat; grad_iat+=cur_dUat; return std::exp(DiffVal); } inline void restore(int iat) {} void acceptMove(ParticleSet& P, int iat) { const DistanceTableData& eI_table=(*P.DistTables[myTableID]); const DistanceTableData& ee_table=(*P.DistTables[0]); // get the old value, grad, lapl computeU3(P, iat, eI_table.Distances[iat], eI_table.Displacements[iat], ee_table.Distances[iat], ee_table.Displacements[iat], Uat[iat], dUat_temp, d2Uat[iat], oldUk, olddUk, oldd2Uk, ions_nearby_old); if(UpdateMode == ORB_PBYP_RATIO) {//ratio-only during the move; need to compute derivatives computeU3(P, iat, eI_table.Temp_r.data(), eI_table.Temp_dr, ee_table.Temp_r.data(), ee_table.Temp_dr, cur_Uat, cur_dUat, cur_d2Uat, newUk, newdUk, newd2Uk, ions_nearby_new); } #pragma omp simd for(int jel=0; jel<Nelec; jel++) { Uat[jel] += newUk[jel]-oldUk[jel]; d2Uat[jel] += newd2Uk[jel]-oldd2Uk[jel]; } for(int idim=0; idim<OHMMS_DIM; ++idim) { valT* restrict save_g=dUat.data(idim); const valT* restrict new_g=newdUk.data(idim); const valT* restrict old_g=olddUk.data(idim); #pragma omp simd aligned(save_g,new_g,old_g) for(int jel=0; jel<Nelec; jel++) save_g[jel]+=new_g[jel]-old_g[jel]; } LogValue += Uat[iat]-cur_Uat; Uat[iat] = cur_Uat; dUat(iat) = cur_dUat; d2Uat[iat] = cur_d2Uat; const int ig = P.GroupID[iat]; // update compact list elecs_inside // if the old position exists in elecs_inside for (int iind=0; iind<ions_nearby_old.size(); iind++) { int jat=ions_nearby_old[iind]; auto iter = find(elecs_inside(ig,jat).begin(), elecs_inside(ig,jat).end(), iat); auto iter_dist = elecs_inside_dist(ig,jat).begin()+std::distance(elecs_inside(ig,jat).begin(),iter); auto iter_displ = elecs_inside_displ(ig,jat).begin()+std::distance(elecs_inside(ig,jat).begin(),iter); if(eI_table.Temp_r[jat] < Ion_cutoff[jat]) // the new position is still inside { *iter_dist = eI_table.Temp_r[jat]; *iter_displ = eI_table.Temp_dr[jat]; *std::find(ions_nearby_new.begin(), ions_nearby_new.end(), jat) = -1; } else { *iter = elecs_inside(ig,jat).back(); elecs_inside(ig,jat).pop_back(); *iter_dist = elecs_inside_dist(ig,jat).back(); elecs_inside_dist(ig,jat).pop_back(); *iter_displ = elecs_inside_displ(ig,jat).back(); elecs_inside_displ(ig,jat).pop_back(); } } // if the old position doesn't exist in elecs_inside but the new position do for (int iind=0; iind<ions_nearby_new.size(); iind++) { int jat=ions_nearby_new[iind]; if(jat>=0) { elecs_inside(ig,jat).push_back(iat); elecs_inside_dist(ig,jat).push_back(eI_table.Temp_r[jat]); elecs_inside_displ(ig,jat).push_back(eI_table.Temp_dr[jat]); } } } inline void recompute(ParticleSet& P) { const DistanceTableData& eI_table=(*P.DistTables[myTableID]); const DistanceTableData& ee_table=(*P.DistTables[0]); build_compact_list(P); for(int jel=0; jel<Nelec; ++jel) { computeU3(P, jel, eI_table.Distances[jel], eI_table.Displacements[jel], ee_table.Distances[jel], ee_table.Displacements[jel], Uat[jel], dUat_temp, d2Uat[jel], newUk, newdUk, newd2Uk, ions_nearby_new, true); dUat(jel) = dUat_temp; // add the contribution from the upper triangle #pragma omp simd for(int kel=0; kel<jel; kel++) { Uat[kel] += newUk[kel]; d2Uat[kel] += newd2Uk[kel]; } for(int idim=0; idim<OHMMS_DIM; ++idim) { valT* restrict save_g=dUat.data(idim); const valT* restrict new_g=newdUk.data(idim); #pragma omp simd aligned(save_g,new_g) for(int kel=0; kel<jel; kel++) save_g[kel]+=new_g[kel]; } } } inline valT computeU(const ParticleSet& P, int jel, int jg, const RealType* distjI, const RealType* distjk, std::vector<int>& ions_nearby) { const DistanceTableData& eI_table=(*P.DistTables[myTableID]); ions_nearby.clear(); for(int iat=0; iat<Nion; ++iat) if(distjI[iat]<Ion_cutoff[iat]) ions_nearby.push_back(iat); valT Uj = valT(0); for(int kg=0; kg<eGroups; ++kg) { int kel_counter = 0; for(int iind=0; iind<ions_nearby.size(); ++iind) { const int iat = ions_nearby[iind]; const int ig = Ions.GroupID[iat]; const valT r_jI = distjI[iat]; for(int kind=0; kind<elecs_inside(kg,iat).size(); kind++) { const int kel=elecs_inside(kg,iat)[kind]; if(kel!=jel) { DistkI_Compressed[kel_counter]=elecs_inside_dist(kg,iat)[kind]; Distjk_Compressed[kel_counter]=distjk[kel]; DistjI_Compressed[kel_counter]=r_jI; kel_counter++; if(kel_counter==Nbuffer) { const FT& feeI(*F(ig,jg,kg)); Uj += feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data()); kel_counter = 0; } } } if((iind+1==ions_nearby.size() || ig!=Ions.GroupID[ions_nearby[iind+1]]) && kel_counter>0) { const FT& feeI(*F(ig,jg,kg)); Uj += feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data()); kel_counter = 0; } } } return Uj; } inline void computeU3_engine(const ParticleSet& P, const FT &feeI, int kel_counter, valT& Uj, posT& dUj, valT& d2Uj, Vector<valT>& Uk, gContainer_type& dUk, Vector<valT>& d2Uk) { const DistanceTableData& eI_table=(*P.DistTables[myTableID]); constexpr valT czero(0); constexpr valT cone(1); constexpr valT ctwo(2); constexpr valT lapfac=OHMMS_DIM-cone; valT* restrict val=mVGL.data(0); valT* restrict gradF0=mVGL.data(1); valT* restrict gradF1=mVGL.data(2); valT* restrict gradF2=mVGL.data(3); valT* restrict hessF00=mVGL.data(4); valT* restrict hessF11=mVGL.data(5); valT* restrict hessF22=mVGL.data(6); valT* restrict hessF01=mVGL.data(7); valT* restrict hessF02=mVGL.data(8); feeI.evaluateVGL(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data(), val, gradF0, gradF1, gradF2, hessF00, hessF11, hessF22, hessF01, hessF02); // compute the contribution to jel, kel Uj=simd::accumulate_n(val,kel_counter,Uj); valT gradF0_sum=simd::accumulate_n(gradF0,kel_counter,czero); valT gradF1_sum=simd::accumulate_n(gradF1,kel_counter,czero); valT hessF00_sum=simd::accumulate_n(hessF00,kel_counter,czero); valT hessF11_sum=simd::accumulate_n(hessF11,kel_counter,czero); d2Uj-=hessF00_sum+hessF11_sum+lapfac*(gradF0_sum+gradF1_sum); std::fill_n(hessF11,kel_counter,czero); for(int idim=0; idim<OHMMS_DIM; ++idim) { valT *restrict jk = Disp_jk_Compressed.data(idim); valT *restrict jI = Disp_jI_Compressed.data(idim); valT *restrict kI = Disp_kI_Compressed.data(idim); valT dUj_x(0); #pragma omp simd aligned(gradF0,gradF1,gradF2,hessF11,jk,jI,kI) reduction(+:dUj_x) for(int kel_index=0; kel_index<kel_counter; kel_index++) { // recycle hessF11 hessF11[kel_index] += kI[kel_index] * jk[kel_index]; dUj_x += gradF1[kel_index] * jI[kel_index]; // destroy jk, kI const valT temp = jk[kel_index] * gradF0[kel_index]; dUj_x += temp; jk[kel_index] *= jI[kel_index]; kI[kel_index] = kI[kel_index] * gradF2[kel_index] - temp; } dUj[idim] += dUj_x; valT *restrict jk0 = Disp_jk_Compressed.data(0); if(idim>0) { #pragma omp simd aligned(jk,jk0) for(int kel_index=0; kel_index<kel_counter; kel_index++) jk0[kel_index] += jk[kel_index]; } valT *restrict dUk_x = dUk.data(idim); for(int kel_index=0; kel_index<kel_counter; kel_index++) dUk_x[DistIndice_k[kel_index]] += kI[kel_index]; } valT sum(0); valT *restrict jk0 = Disp_jk_Compressed.data(0); #pragma omp simd aligned(jk0,hessF01) reduction(+:sum) for(int kel_index=0; kel_index<kel_counter; kel_index++) sum += hessF01[kel_index] * jk0[kel_index]; d2Uj -= ctwo * sum; #pragma omp simd aligned(hessF00,hessF22,gradF0,gradF2,hessF02,hessF11) for(int kel_index=0; kel_index<kel_counter; kel_index++) hessF00[kel_index] = hessF00[kel_index] + hessF22[kel_index] + lapfac*(gradF0[kel_index] + gradF2[kel_index]) - ctwo*hessF02[kel_index] * hessF11[kel_index]; for(int kel_index=0; kel_index<kel_counter; kel_index++) { const int kel=DistIndice_k[kel_index]; Uk[kel] += val[kel_index]; d2Uk[kel] -= hessF00[kel_index]; } } inline void computeU3(const ParticleSet& P, int jel, const RealType* distjI, const RowContainer& displjI, const RealType* distjk, const RowContainer& displjk, valT& Uj, posT& dUj, valT& d2Uj, Vector<valT>& Uk, gContainer_type& dUk, Vector<valT>& d2Uk, std::vector<int>& ions_nearby, bool triangle=false) { constexpr valT czero(0); Uj = czero; dUj = posT(); d2Uj = czero; const int jg=P.GroupID[jel]; const int kelmax=triangle?jel:Nelec; std::fill_n(Uk.data(),kelmax,czero); std::fill_n(d2Uk.data(),kelmax,czero); for(int idim=0; idim<OHMMS_DIM; ++idim) std::fill_n(dUk.data(idim),kelmax,czero); ions_nearby.clear(); for(int iat=0; iat<Nion; ++iat) if(distjI[iat]<Ion_cutoff[iat]) ions_nearby.push_back(iat); for(int kg=0; kg<eGroups; ++kg) { int kel_counter = 0; for(int iind=0; iind<ions_nearby.size(); ++iind) { const int iat = ions_nearby[iind]; const int ig = Ions.GroupID[iat]; const valT r_jI = distjI[iat]; const posT disp_Ij = displjI[iat]; for(int kind=0; kind<elecs_inside(kg,iat).size(); kind++) { const int kel=elecs_inside(kg,iat)[kind]; if(kel<kelmax && kel!=jel) { DistkI_Compressed[kel_counter]=elecs_inside_dist(kg,iat)[kind]; DistjI_Compressed[kel_counter]=r_jI; Distjk_Compressed[kel_counter]=distjk[kel]; Disp_kI_Compressed(kel_counter)=elecs_inside_displ(kg,iat)[kind]; Disp_jI_Compressed(kel_counter)=disp_Ij; Disp_jk_Compressed(kel_counter)=displjk[kel]; DistIndice_k[kel_counter]=kel; kel_counter++; if(kel_counter==Nbuffer) { const FT& feeI(*F(ig,jg,kg)); computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk); kel_counter = 0; } } } if((iind+1==ions_nearby.size() || ig!=Ions.GroupID[ions_nearby[iind+1]]) && kel_counter>0) { const FT& feeI(*F(ig,jg,kg)); computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk); kel_counter = 0; } } } } inline void registerData(ParticleSet& P, WFBufferType& buf) { if ( Bytes_in_WFBuffer == 0 ) { Bytes_in_WFBuffer = buf.current(); buf.add(Uat.begin(), Uat.end()); buf.add(dUat.data(), dUat.end()); buf.add(d2Uat.begin(), d2Uat.end()); Bytes_in_WFBuffer = buf.current()-Bytes_in_WFBuffer; // free local space Uat.free(); dUat.free(); d2Uat.free(); } else { buf.forward(Bytes_in_WFBuffer); } } inline RealType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch=false) { evaluateGL(P, P.G, P.L, false); buf.forward(Bytes_in_WFBuffer); return LogValue; } inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf) { Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec); dUat.attachReference(Nelec, Nelec_padded, buf.lendReference<valT>(Nelec_padded*OHMMS_DIM)); d2Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec); build_compact_list(P); } void evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch=false) { if(fromscratch) recompute(P); LogValue=valT(0); for(int iat=0; iat<Nelec; ++iat) { LogValue += Uat[iat]; G[iat] += dUat[iat]; L[iat] += d2Uat[iat]; } constexpr valT mhalf(-0.5); LogValue=mhalf*LogValue; } void evaluateDerivatives(ParticleSet& P, const opt_variables_type& optvars, std::vector<RealType>& dlogpsi, std::vector<RealType>& dhpsioverpsi) { bool recalculate(false); std::vector<bool> rcsingles(myVars.size(),false); for (int k=0; k<myVars.size(); ++k) { int kk=myVars.where(k); if (kk<0) continue; if (optvars.recompute(kk)) recalculate=true; rcsingles[k]=true; } if (recalculate) { constexpr valT czero(0); constexpr valT cone(1); constexpr valT cminus(-1); constexpr valT ctwo(2); constexpr valT lapfac=OHMMS_DIM-cone; const DistanceTableData& ee_table=(*P.DistTables[0]); const DistanceTableData& eI_table=(*P.DistTables[myTableID]); build_compact_list(P); dLogPsi = czero; gradLogPsi = PosType(); lapLogPsi = czero; for(int iat=0; iat<Nion; ++iat) { const int ig=Ions.GroupID[iat]; for(int jg=0; jg<eGroups; ++jg) for(int jind=0; jind<elecs_inside(jg,iat).size(); jind++) { const int jel=elecs_inside(jg,iat)[jind]; const valT r_Ij = elecs_inside_dist(jg,iat)[jind]; const posT disp_Ij = cminus*elecs_inside_displ(jg,iat)[jind]; const valT r_Ij_inv = cone/r_Ij; for(int kg=0; kg<eGroups; ++kg) for(int kind=0; kind<elecs_inside(kg,iat).size(); kind++) { const int kel=elecs_inside(kg,iat)[kind]; if(kel<jel) { const FT& feeI(*F(ig,jg,kg)); const valT r_Ik = elecs_inside_dist(kg,iat)[kind]; const posT disp_Ik = cminus*elecs_inside_displ(kg,iat)[kind]; const valT r_Ik_inv = cone/r_Ik; const valT r_jk = ee_table.Distances[jel][kel]; const posT disp_jk = ee_table.Displacements[jel][kel]; const valT r_jk_inv = cone/r_jk; FT &func = *F(ig, jg, kg); int idx = J3UniqueIndex[F(ig, jg, kg)]; func.evaluateDerivatives(r_jk, r_Ij, r_Ik, du_dalpha[idx], dgrad_dalpha[idx], dhess_dalpha[idx]); int first = VarOffset(ig,jg,kg).first; int last = VarOffset(ig,jg,kg).second; std::vector<RealType> &dlog = du_dalpha[idx]; std::vector<PosType> &dgrad = dgrad_dalpha[idx]; std::vector<Tensor<RealType,3> > &dhess = dhess_dalpha[idx]; for (int p=first,ip=0; p<last; p++,ip++) { RealType& dval = dlog[ip]; PosType& dg = dgrad[ip]; Tensor<RealType,3>& dh = dhess[ip]; dg[0]*=r_jk_inv; dg[1]*=r_Ij_inv; dg[2]*=r_Ik_inv; PosType gr_ee = dg[0] * disp_jk; gradLogPsi(p,jel) -= dg[1] * disp_Ij - gr_ee; lapLogPsi(p,jel) -= (dh(0,0) + lapfac*dg[0] - ctwo*dh(0,1)*dot(disp_jk,disp_Ij)*r_jk_inv*r_Ij_inv + dh(1,1) + lapfac*dg[1]); gradLogPsi(p,kel) -= dg[2] * disp_Ik + gr_ee; lapLogPsi(p,kel) -= (dh(0,0) + lapfac*dg[0] + ctwo*dh(0,2)*dot(disp_jk,disp_Ik)*r_jk_inv*r_Ik_inv + dh(2,2) + lapfac*dg[2]); dLogPsi[p] -= dval; } } } } } for (int k=0; k<myVars.size(); ++k) { int kk=myVars.where(k); if (kk<0) continue; dlogpsi[kk]=dLogPsi[k]; RealType sum = 0.0; for (int i=0; i<Nelec; i++) { #if defined(QMC_COMPLEX) sum -= 0.5*lapLogPsi(k,i); for(int jdim=0; jdim<OHMMS_DIM; ++jdim) sum -= P.G[i][jdim].real()*gradLogPsi(k,i)[jdim]; #else sum -= 0.5*lapLogPsi(k,i) + dot(P.G[i], gradLogPsi(k,i)); #endif } dhpsioverpsi[kk] = sum; } } } }; } #endif
dci.c
/* * Code for Fast k-Nearest Neighbour Search via Prioritized DCI * * This code implements the method described in the Prioritized DCI paper, which * can be found at https://arxiv.org/abs/1703.00440 * * Copyright (C) 2017 Ke Li * * * This file is part of the Dynamic Continuous Indexing reference implementation. * * The Dynamic Continuous Indexing reference implementation is free software: * you can redistribute it and/or modify it under the terms of the GNU Affero * General Public License as published by the Free Software Foundation, either * version 3 of the License, or (at your option) any later version. * * The Dynamic Continuous Indexing reference implementation is distributed in * the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with the Dynamic Continuous Indexing reference implementation. If * not, see <http://www.gnu.org/licenses/>. */ #include <malloc.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <assert.h> #include <float.h> #include <omp.h> #include <stdio.h> #include "dci.h" #include "util.h" static inline double abs_d(double x) { return x > 0 ? x : -x; } static inline int min_i(int a, int b) { return a < b ? a : b; } static inline int max_i(int a, int b) { return a > b ? a : b; } typedef struct tree_node { int parent; int child; } tree_node; static void dci_gen_proj_vec(double* const proj_vec, const int dim, const int num_indices) { int i, j; double sq_norm, norm; for (i = 0; i < dim*num_indices; i++) { proj_vec[i] = rand_normal(); } for (j = 0; j < num_indices; j++) { sq_norm = 0.0; for (i = 0; i < dim; i++) { sq_norm += (proj_vec[i+j*dim] * proj_vec[i+j*dim]); } norm = sqrt(sq_norm); for (i = 0; i < dim; i++) { proj_vec[i+j*dim] /= norm; } } } void dci_init(dci* const dci_inst, const int dim, const int num_comp_indices, const int num_simp_indices) { int num_indices = num_comp_indices*num_simp_indices; //srand48(time(NULL)); srand48(5); dci_inst->dim = dim; dci_inst->num_comp_indices = num_comp_indices; dci_inst->num_simp_indices = num_simp_indices; dci_inst->num_points = 0; dci_inst->num_levels = 0; dci_inst->num_coarse_points = 0; dci_inst->proj_vec = (double *)memalign(64, sizeof(double)*dim*num_indices); dci_inst->indices = NULL; dci_inst->data = NULL; dci_inst->next_level_ranges = NULL; dci_inst->num_finest_level_points = NULL; dci_gen_proj_vec(dci_inst->proj_vec, dim, num_indices); } static int dci_compare_idx_elem(const void *a, const void *b) { double key_diff = ((idx_elem *)a)->key - ((idx_elem *)b)->key; return (key_diff > 0) - (key_diff < 0); } static int dci_compare_tree_node(const void *a, const void *b) { return ((tree_node *)a)->parent - ((tree_node *)b)->parent; } static void dci_assign_parent(dci* const dci_inst, const int num_populated_levels, const int num_queries, const int *selected_query_pos, const double* const query, const double* const query_proj, const dci_query_config query_config, tree_node* const assigned_parent); // Note: the data itself is not kept in the index and must be kept in-place // Added data must be contiguous void dci_add(dci* const dci_inst, const int dim, const int num_points, const double* const data, const int num_levels, const dci_query_config construction_query_config) { int h, i, j; int actual_num_levels, num_points_on_upper_levels, num_points_on_upper_and_cur_levels; // Only populated when actual_num_levels >= 2 int **level_members; int num_indices = dci_inst->num_comp_indices*dci_inst->num_simp_indices; double *data_proj = (double *)memalign(64, sizeof(double)*num_indices*num_points); // (# of indices) x (# of points) column-major when actual_num_levels >= 2, (# of points) x (# of indices) otherwise bool data_proj_transposed = false; // True if data_proj is (# of points) x (# of indices) column-major; used only for error-checking tree_node *assigned_parent; int *data_levels; double promotion_prob; int num_points_on_level[num_levels]; int level_relabelling[num_levels]; assert(dim == dci_inst->dim); assert(dci_inst->num_points == 0); dci_inst->data = data; dci_inst->num_points = num_points; if (num_levels < 2) { num_points_on_level[0] = num_points; actual_num_levels = num_levels; level_members = NULL; } else { data_levels = (int *)malloc(sizeof(int)*num_points); promotion_prob = pow((double)num_points, -1.0 / num_levels); for (i = 0; i < num_levels; i++) { num_points_on_level[i] = 0; } for (j = 0; j < num_points; j++) { for (i = 0; i < num_levels - 1; i++) { if (drand48() > promotion_prob) { break; } } num_points_on_level[i]++; data_levels[j] = i; } // Remove all levels with no points h = 0; for (i = 0; i < num_levels; i++) { if (num_points_on_level[i] > 0) { level_relabelling[i] = h; h++; } else { level_relabelling[i] = -1; } } actual_num_levels = h; for (i = 0; i < num_levels; i++) { if (level_relabelling[i] >= 0) { num_points_on_level[level_relabelling[i]] = num_points_on_level[i]; } } if (actual_num_levels >= 2) { level_members = (int **)malloc(sizeof(int*)*actual_num_levels); for (i = 0; i < actual_num_levels; i++) { level_members[i] = (int *)malloc(sizeof(int)*num_points_on_level[i]); h = 0; for (j = 0; j < num_points; j++) { if (level_relabelling[data_levels[j]] == i) { level_members[i][h] = j; h++; } } assert(h == num_points_on_level[i]); } } else { level_members = NULL; } free(data_levels); } dci_inst->num_coarse_points = num_points_on_level[actual_num_levels - 1]; dci_inst->num_levels = actual_num_levels; dci_inst->indices = (idx_elem **)malloc(sizeof(idx_elem*)*actual_num_levels); num_points_on_upper_and_cur_levels = 0; for (i = actual_num_levels - 1; i >= 0; i--) { num_points_on_upper_and_cur_levels += num_points_on_level[i]; dci_inst->indices[i] = (idx_elem *)malloc(sizeof(idx_elem)*num_points_on_upper_and_cur_levels*num_indices); } dci_inst->next_level_ranges = (range **)malloc(sizeof(range*)*actual_num_levels); num_points_on_upper_and_cur_levels = 0; for (i = actual_num_levels - 1; i >= 1; i--) { num_points_on_upper_and_cur_levels += num_points_on_level[i]; dci_inst->next_level_ranges[i] = (range *)malloc(sizeof(range)*num_points_on_upper_and_cur_levels); } dci_inst->next_level_ranges[0] = NULL; i = actual_num_levels - 1; num_points_on_upper_and_cur_levels = num_points_on_level[i]; if (actual_num_levels < 2) { assigned_parent = NULL; // data_proj is (# of points) x (# of indices) column-major matmul(num_points, num_indices, dci_inst->dim, data, dci_inst->proj_vec, data_proj); data_proj_transposed = true; for (j = 0; j < num_indices*num_points_on_upper_and_cur_levels; j++) { dci_inst->indices[i][j].key = data_proj[j]; dci_inst->indices[i][j].local_value = j % num_points_on_upper_and_cur_levels; dci_inst->indices[i][j].global_value = j % num_points_on_upper_and_cur_levels; } } else { assigned_parent = (tree_node *)malloc(sizeof(tree_node)*num_points); // data_proj is (# of indices) x (# of points) column-major matmul(num_indices, num_points, dci_inst->dim, dci_inst->proj_vec, data, data_proj); for (j = 0; j < num_points_on_upper_and_cur_levels; j++) { assigned_parent[j].child = level_members[i][j]; } for (j = 0; j < num_points_on_upper_and_cur_levels; j++) { int k; for (k = 0; k < num_indices; k++) { dci_inst->indices[i][j+k*num_points_on_upper_and_cur_levels].key = data_proj[k+level_members[i][j]*num_indices]; dci_inst->indices[i][j+k*num_points_on_upper_and_cur_levels].local_value = j; dci_inst->indices[i][j+k*num_points_on_upper_and_cur_levels].global_value = level_members[i][j]; } } } #pragma omp parallel for for (j = 0; j < num_indices; j++) { qsort(&(dci_inst->indices[i][j*num_points_on_level[i]]), num_points_on_level[i], sizeof(idx_elem), dci_compare_idx_elem); } num_points_on_upper_levels = num_points_on_upper_and_cur_levels; for (i = actual_num_levels - 2; i >= 0; i--) { assert(!data_proj_transposed); for (j = 0; j < num_points_on_upper_levels; j++) { assigned_parent[j].parent = j; } dci_assign_parent(dci_inst, actual_num_levels - i - 1, num_points_on_level[i], level_members[i], data, data_proj, construction_query_config, &(assigned_parent[num_points_on_upper_levels])); num_points_on_upper_and_cur_levels = num_points_on_upper_levels + num_points_on_level[i]; qsort(assigned_parent, num_points_on_upper_and_cur_levels, sizeof(tree_node), dci_compare_tree_node); h = 0; dci_inst->next_level_ranges[i+1][0].start = 0; for (j = 0; j < num_points_on_upper_and_cur_levels; j++) { if (assigned_parent[j].parent > h) { dci_inst->next_level_ranges[i+1][h].num = j - dci_inst->next_level_ranges[i+1][h].start; assert(dci_inst->next_level_ranges[i+1][h].num > 0); h++; assert(assigned_parent[j].parent == h); dci_inst->next_level_ranges[i+1][h].start = j; } } dci_inst->next_level_ranges[i+1][h].num = num_points_on_upper_and_cur_levels - dci_inst->next_level_ranges[i+1][h].start; assert(h == num_points_on_upper_levels - 1); for (j = 0; j < num_points_on_upper_and_cur_levels; j++) { range cur_indices_range = dci_inst->next_level_ranges[i+1][assigned_parent[j].parent]; int k; for (k = 0; k < num_indices; k++) { dci_inst->indices[i][(j-cur_indices_range.start)+k*cur_indices_range.num+cur_indices_range.start*num_indices].key = data_proj[k+assigned_parent[j].child*num_indices]; dci_inst->indices[i][(j-cur_indices_range.start)+k*cur_indices_range.num+cur_indices_range.start*num_indices].local_value = j - cur_indices_range.start; dci_inst->indices[i][(j-cur_indices_range.start)+k*cur_indices_range.num+cur_indices_range.start*num_indices].global_value = assigned_parent[j].child; } } #pragma omp parallel for for (j = 0; j < num_points_on_upper_levels*num_indices; j++) { range cur_indices_range = dci_inst->next_level_ranges[i+1][j / num_indices]; int k = j % num_indices; qsort(&(dci_inst->indices[i][k*cur_indices_range.num+cur_indices_range.start*num_indices]), cur_indices_range.num, sizeof(idx_elem), dci_compare_idx_elem); } num_points_on_upper_levels = num_points_on_upper_and_cur_levels; } assert(num_points_on_upper_levels == num_points); // Populate dci_inst->num_finest_level_points dci_inst->num_finest_level_points = (int **)malloc(sizeof(int*)*actual_num_levels); dci_inst->num_finest_level_points[0] = NULL; if (actual_num_levels >= 2) { num_points_on_upper_and_cur_levels = num_points - num_points_on_level[0]; dci_inst->num_finest_level_points[1] = (int *)malloc(sizeof(int)*num_points_on_upper_and_cur_levels); for (j = 0; j < num_points_on_upper_and_cur_levels; j++) { dci_inst->num_finest_level_points[1][j] = dci_inst->next_level_ranges[1][j].num; } for (i = 2; i < actual_num_levels; i++) { num_points_on_upper_and_cur_levels -= num_points_on_level[i-1]; dci_inst->num_finest_level_points[i] = (int *)malloc(sizeof(int)*num_points_on_upper_and_cur_levels); for (j = 0; j < num_points_on_upper_and_cur_levels; j++) { dci_inst->num_finest_level_points[i][j] = 0; int k; for (k = dci_inst->next_level_ranges[i][j].start; k < dci_inst->next_level_ranges[i][j].start+dci_inst->next_level_ranges[i][j].num; k++) { dci_inst->num_finest_level_points[i][j] += dci_inst->num_finest_level_points[i-1][k]; } } } } if (actual_num_levels >= 2) { for (i = 0; i < actual_num_levels; i++) { free(level_members[i]); } free(level_members); free(assigned_parent); } free(data_proj); } static inline int dci_next_closest_proj(const idx_elem* const index, int* const left_pos, int* const right_pos, const double query_proj, const int num_elems) { int cur_pos; if (*left_pos == -1 && *right_pos == num_elems) { cur_pos = -1; } else if (*left_pos == -1) { cur_pos = *right_pos; ++(*right_pos); } else if (*right_pos == num_elems) { cur_pos = *left_pos; --(*left_pos); } else if (index[*right_pos].key - query_proj < query_proj - index[*left_pos].key) { cur_pos = *right_pos; ++(*right_pos); } else { cur_pos = *left_pos; --(*left_pos); } return cur_pos; } // Returns the index of the element whose key is the largest that is less than the key // Returns an integer from -1 to num_elems - 1 inclusive // Could return -1 if all elements are greater or equal to key static inline int dci_search_index(const idx_elem* const index, const double key, const int num_elems) { int start_pos, end_pos, cur_pos; start_pos = -1; end_pos = num_elems - 1; cur_pos = (start_pos + end_pos + 2) / 2; while (start_pos < end_pos) { if (index[cur_pos].key < key) { start_pos = cur_pos; } else { end_pos = cur_pos - 1; } cur_pos = (start_pos + end_pos + 2) / 2; } return start_pos; } // Blind querying does not compute distances or look at the values of indexed vectors // Either num_to_visit or prop_to_visit can be -1; similarly, either num_to_retrieve or prop_to_retrieve can be -1 // Returns whenever we have visited max(num_to_visit, prop_to_visit*num_points) points or retrieved max(num_to_retrieve, prop_to_retrieve*num_points) points, whichever happens first static int dci_query_single_point_single_level(const dci* const dci_inst, const idx_elem* const indices, int num_points, int num_neighbours, const double* const query, const double* const query_proj, const dci_query_config query_config, const int* const num_finest_level_points, idx_elem* const top_candidates, double* const index_priority, int* const left_pos, int* const right_pos, int* const cur_point_local_ids, int* const cur_point_global_ids, int* const counts, double* const candidate_dists, double* const farthest_dists) { int i, j, k, m, h, top_h; int num_indices = dci_inst->num_comp_indices*dci_inst->num_simp_indices; int cur_pos; double cur_dist, cur_proj_dist, top_index_priority; int num_candidates = 0; double last_top_candidate_dist = -1.0; // The distance of the k^th closest candidate found so far int last_top_candidate = -1; int num_returned = 0; int num_returned_finest_level_points = 0; int num_dist_evals = 0; assert(num_neighbours > 0); int num_points_to_retrieve = max_i(query_config.num_to_retrieve, (int)ceil(query_config.prop_to_retrieve*num_points)); int num_projs_to_visit = max_i(query_config.num_to_visit*dci_inst->num_simp_indices, (int)ceil(query_config.prop_to_visit*num_points*dci_inst->num_simp_indices)); for (i = 0; i < dci_inst->num_comp_indices*num_points; i++) { counts[i] = 0; } if (!query_config.blind) { for (m = 0; m < dci_inst->num_comp_indices; m++) { farthest_dists[m] = 0.0; } } for (i = 0; i < num_points; i++) { candidate_dists[i] = -1.0; } for (i = 0; i < num_indices; i++) { left_pos[i] = dci_search_index(&(indices[i*num_points]), query_proj[i], num_points); right_pos[i] = left_pos[i] + 1; } for (i = 0; i < num_indices; i++) { cur_pos = dci_next_closest_proj(&(indices[i*num_points]), &(left_pos[i]), &(right_pos[i]), query_proj[i], num_points); assert(cur_pos >= 0); // There should be at least one point in the index index_priority[i] = abs_d(indices[cur_pos+i*num_points].key - query_proj[i]); cur_point_local_ids[i] = indices[cur_pos+i*num_points].local_value; assert(cur_point_local_ids[i] >= 0); cur_point_global_ids[i] = indices[cur_pos+i*num_points].global_value; assert(cur_point_global_ids[i] >= 0); } k = 0; while (k < num_points*dci_inst->num_simp_indices) { for (m = 0; m < dci_inst->num_comp_indices; m++) { top_index_priority = DBL_MAX; top_h = -1; for (h = 0; h < dci_inst->num_simp_indices; h++) { if (index_priority[h+m*dci_inst->num_simp_indices] < top_index_priority) { top_index_priority = index_priority[h+m*dci_inst->num_simp_indices]; top_h = h; } } if (top_h >= 0) { i = top_h+m*dci_inst->num_simp_indices; counts[cur_point_local_ids[i]+m*num_points]++; if (counts[cur_point_local_ids[i]+m*num_points] == dci_inst->num_simp_indices) { if (query_config.blind) { if (candidate_dists[cur_point_local_ids[i]] < 0.0) { top_candidates[num_candidates].local_value = cur_point_local_ids[i]; top_candidates[num_candidates].global_value = cur_point_global_ids[i]; candidate_dists[cur_point_local_ids[i]] = top_index_priority; num_candidates++; if (query_config.min_num_finest_level_points > 1) { num_returned_finest_level_points += num_finest_level_points[cur_point_local_ids[i]]; } } else if (top_index_priority > candidate_dists[cur_point_local_ids[i]]) { candidate_dists[cur_point_local_ids[i]] = top_index_priority; } } else { if (candidate_dists[cur_point_local_ids[i]] < 0.0) { // Compute distance cur_dist = compute_dist(&(dci_inst->data[((long long int)cur_point_global_ids[i])*dci_inst->dim]), query, dci_inst->dim); candidate_dists[cur_point_local_ids[i]] = cur_dist; num_dist_evals++; if (num_candidates < num_neighbours) { top_candidates[num_returned].key = cur_dist; top_candidates[num_returned].local_value = cur_point_local_ids[i]; top_candidates[num_returned].global_value = cur_point_global_ids[i]; if (cur_dist > last_top_candidate_dist) { last_top_candidate_dist = cur_dist; last_top_candidate = num_returned; } num_returned++; if (query_config.min_num_finest_level_points > 1) { num_returned_finest_level_points += num_finest_level_points[cur_point_local_ids[i]]; } } else if (cur_dist < last_top_candidate_dist) { if (query_config.min_num_finest_level_points > 1 && num_returned_finest_level_points + num_finest_level_points[cur_point_local_ids[i]] - num_finest_level_points[top_candidates[last_top_candidate].local_value] < query_config.min_num_finest_level_points) { // Add top_candidates[num_returned].key = cur_dist; top_candidates[num_returned].local_value = cur_point_local_ids[i]; top_candidates[num_returned].global_value = cur_point_global_ids[i]; if (cur_dist > last_top_candidate_dist) { last_top_candidate_dist = cur_dist; last_top_candidate = num_returned; } num_returned++; num_returned_finest_level_points += num_finest_level_points[cur_point_local_ids[i]]; } else { // Replace // If num_returned > num_neighbours, may need to delete, but will leave this to the end if (query_config.min_num_finest_level_points > 1) { num_returned_finest_level_points += num_finest_level_points[cur_point_local_ids[i]] - num_finest_level_points[top_candidates[last_top_candidate].local_value]; } top_candidates[last_top_candidate].key = cur_dist; top_candidates[last_top_candidate].local_value = cur_point_local_ids[i]; top_candidates[last_top_candidate].global_value = cur_point_global_ids[i]; last_top_candidate_dist = -1.0; for (j = 0; j < num_returned; j++) { if (top_candidates[j].key > last_top_candidate_dist) { last_top_candidate_dist = top_candidates[j].key; last_top_candidate = j; } } } } num_candidates++; } else { cur_dist = candidate_dists[cur_point_local_ids[i]]; } if (cur_dist > farthest_dists[m]) { farthest_dists[m] = cur_dist; } } } cur_pos = dci_next_closest_proj(&(indices[i*num_points]), &(left_pos[i]), &(right_pos[i]), query_proj[i], num_points); if (cur_pos >= 0) { cur_proj_dist = abs_d(indices[cur_pos+i*num_points].key - query_proj[i]); index_priority[i] = cur_proj_dist; cur_point_local_ids[i] = indices[cur_pos+i*num_points].local_value; cur_point_global_ids[i] = indices[cur_pos+i*num_points].global_value; } else { index_priority[i] = DBL_MAX; cur_point_local_ids[i] = -1; cur_point_global_ids[i] = -1; } } } if (num_candidates >= num_neighbours && num_returned_finest_level_points >= query_config.min_num_finest_level_points) { if (k + 1 >= num_projs_to_visit || num_candidates >= num_points_to_retrieve) { break; } } k++; } if (query_config.blind) { for (j = 0; j < num_candidates; j++) { top_candidates[j].key = candidate_dists[top_candidates[j].local_value]; } qsort(top_candidates, num_candidates, sizeof(idx_elem), dci_compare_idx_elem); num_returned = min_i(num_candidates, num_points_to_retrieve); } else { qsort(top_candidates, num_returned, sizeof(idx_elem), dci_compare_idx_elem); if (query_config.min_num_finest_level_points > 1) { num_returned_finest_level_points = 0; // Delete the points that are not needed to make num_returned_finest_level_points exceed query_config.min_num_finest_level_points for (j = 0; j < num_returned - 1; j++) { num_returned_finest_level_points += num_finest_level_points[top_candidates[j].local_value]; if (num_returned_finest_level_points >= query_config.min_num_finest_level_points) { break; } } num_returned = max_i(min_i(num_neighbours, num_points), j + 1); } } return num_returned; } static int dci_query_single_point(const dci* const dci_inst, int num_populated_levels, int num_neighbours, const double* const query, const double* const query_proj, dci_query_config query_config, idx_elem* const top_candidates) { int i, j, k, l; int num_indices = dci_inst->num_comp_indices*dci_inst->num_simp_indices; int num_points_to_expand; int max_num_points_to_expand = max_i(query_config.field_of_view, num_neighbours); if (query_config.blind) { max_num_points_to_expand += dci_inst->num_comp_indices-1; } idx_elem points_to_expand[max_num_points_to_expand*max_num_points_to_expand]; idx_elem points_to_expand_next[max_num_points_to_expand*max_num_points_to_expand]; int top_level_counts[dci_inst->num_comp_indices*dci_inst->num_coarse_points]; double top_level_candidate_dists[dci_inst->num_coarse_points]; // Only used when non-blind querying is used double top_level_farthest_dists[dci_inst->num_comp_indices]; int top_level_left_pos[num_indices]; int top_level_right_pos[num_indices]; double top_level_index_priority[num_indices]; // Relative priority of simple indices in each composite index int top_level_cur_point_local_ids[num_indices]; // Point at the current location in each index int top_level_cur_point_global_ids[num_indices]; // Point at the current location in each index int num_top_candidates[max_num_points_to_expand]; int total_num_top_candidates, num_finest_level_points_to_expand; assert(num_populated_levels <= dci_inst->num_levels); if (num_populated_levels <= 1) { if (query_config.blind) { query_config.num_to_retrieve = num_neighbours; query_config.prop_to_retrieve = -1.0; } query_config.min_num_finest_level_points = 0; num_points_to_expand = dci_query_single_point_single_level(dci_inst, dci_inst->indices[dci_inst->num_levels - 1], dci_inst->num_coarse_points, num_neighbours, query, query_proj, query_config, NULL, points_to_expand_next, top_level_index_priority, top_level_left_pos, top_level_right_pos, top_level_cur_point_local_ids, top_level_cur_point_global_ids, top_level_counts, top_level_candidate_dists, top_level_farthest_dists); } else { assert(query_config.field_of_view > 0); if (query_config.blind) { query_config.num_to_retrieve = query_config.field_of_view; query_config.prop_to_retrieve = -1.0; } query_config.min_num_finest_level_points = num_neighbours; if (num_neighbours > 1) { num_points_to_expand = dci_query_single_point_single_level(dci_inst, dci_inst->indices[dci_inst->num_levels - 1], dci_inst->num_coarse_points, query_config.field_of_view, query, query_proj, query_config, dci_inst->num_finest_level_points[dci_inst->num_levels - 1], points_to_expand, top_level_index_priority, top_level_left_pos, top_level_right_pos, top_level_cur_point_local_ids, top_level_cur_point_global_ids, top_level_counts, top_level_candidate_dists, top_level_farthest_dists); } else { num_points_to_expand = dci_query_single_point_single_level(dci_inst, dci_inst->indices[dci_inst->num_levels - 1], dci_inst->num_coarse_points, query_config.field_of_view, query, query_proj, query_config, NULL, points_to_expand, top_level_index_priority, top_level_left_pos, top_level_right_pos, top_level_cur_point_local_ids, top_level_cur_point_global_ids, top_level_counts, top_level_candidate_dists, top_level_farthest_dists); } for (i = dci_inst->num_levels - 2; i >= dci_inst->num_levels - num_populated_levels + 1; i--) { #pragma omp parallel for for (j = 0; j < num_points_to_expand; j++) { range mid_level_indices_range = dci_inst->next_level_ranges[i+1][points_to_expand[j].local_value]; int mid_level_counts[dci_inst->num_comp_indices*mid_level_indices_range.num]; double mid_level_candidate_dists[mid_level_indices_range.num]; // Only used when non-blind querying is used double mid_level_farthest_dists[dci_inst->num_comp_indices]; int num_indices_local = dci_inst->num_comp_indices*dci_inst->num_simp_indices; int mid_level_left_pos[num_indices_local]; int mid_level_right_pos[num_indices_local]; double mid_level_index_priority[num_indices_local]; // Relative priority of simple indices in each composite index int mid_level_cur_point_local_ids[num_indices_local]; // Point at the current location in each index int mid_level_cur_point_global_ids[num_indices_local]; // Point at the current location in each index int m; if (num_neighbours > 1) { num_top_candidates[j] = dci_query_single_point_single_level(dci_inst, &(dci_inst->indices[i][mid_level_indices_range.start*num_indices]), mid_level_indices_range.num, query_config.field_of_view, query, query_proj, query_config, &(dci_inst->num_finest_level_points[i][mid_level_indices_range.start]), &(points_to_expand_next[j*max_num_points_to_expand]), mid_level_index_priority, mid_level_left_pos, mid_level_right_pos, mid_level_cur_point_local_ids, mid_level_cur_point_global_ids, mid_level_counts, mid_level_candidate_dists, mid_level_farthest_dists); } else { num_top_candidates[j] = dci_query_single_point_single_level(dci_inst, &(dci_inst->indices[i][mid_level_indices_range.start*num_indices]), mid_level_indices_range.num, query_config.field_of_view, query, query_proj, query_config, NULL, &(points_to_expand_next[j*max_num_points_to_expand]), mid_level_index_priority, mid_level_left_pos, mid_level_right_pos, mid_level_cur_point_local_ids, mid_level_cur_point_global_ids, mid_level_counts, mid_level_candidate_dists, mid_level_farthest_dists); } for (m = 0; m < num_top_candidates[j]; m++) { points_to_expand_next[j*max_num_points_to_expand+m].local_value += mid_level_indices_range.start; } assert(num_top_candidates[j] <= max_num_points_to_expand); } // Remove empty slots in points_to_expand_next and make it contiguous for (k = 0; k < num_points_to_expand; k++) { if (num_top_candidates[k] < max_num_points_to_expand) { break; } } if (k < num_points_to_expand) { total_num_top_candidates = k*max_num_points_to_expand + num_top_candidates[k]; k++; for (; k < num_points_to_expand; k++) { for (l = 0; l < num_top_candidates[k]; l++) { points_to_expand_next[total_num_top_candidates] = points_to_expand_next[k*max_num_points_to_expand+l]; total_num_top_candidates++; } } } else { total_num_top_candidates = num_points_to_expand*max_num_points_to_expand; } qsort(points_to_expand_next, total_num_top_candidates, sizeof(idx_elem), dci_compare_idx_elem); if (num_neighbours > 1) { num_finest_level_points_to_expand = 0; // Delete the points that are not needed to make num_finest_level_points_to_expand exceed num_neighbours for (k = 0; k < total_num_top_candidates - 1; k++) { num_finest_level_points_to_expand += dci_inst->num_finest_level_points[i][points_to_expand_next[k].local_value]; if (num_finest_level_points_to_expand >= num_neighbours) { break; } } num_points_to_expand = max_i(min_i(query_config.field_of_view, total_num_top_candidates), k + 1); } else { num_points_to_expand = min_i(query_config.field_of_view, total_num_top_candidates); } for (k = 0; k < num_points_to_expand; k++) { points_to_expand[k] = points_to_expand_next[k]; } } if (query_config.blind) { query_config.num_to_retrieve = num_neighbours; query_config.prop_to_retrieve = -1.0; } query_config.min_num_finest_level_points = 0; #pragma omp parallel for for (j = 0; j < num_points_to_expand; j++) { range bottom_level_indices_range = dci_inst->next_level_ranges[dci_inst->num_levels - num_populated_levels + 1][points_to_expand[j].local_value]; int bottom_level_counts[dci_inst->num_comp_indices*bottom_level_indices_range.num]; double bottom_level_candidate_dists[bottom_level_indices_range.num]; // Only used when non-blind querying is used double bottom_level_farthest_dists[dci_inst->num_comp_indices]; int num_indices_local = dci_inst->num_comp_indices*dci_inst->num_simp_indices; int bottom_level_left_pos[num_indices_local]; int bottom_level_right_pos[num_indices_local]; double bottom_level_index_priority[num_indices_local]; // Relative priority of simple indices in each composite index int bottom_level_cur_point_local_ids[num_indices_local]; // Point at the current location in each index int bottom_level_cur_point_global_ids[num_indices_local]; // Point at the current location in each index int m; num_top_candidates[j] = dci_query_single_point_single_level(dci_inst, &(dci_inst->indices[dci_inst->num_levels - num_populated_levels][bottom_level_indices_range.start*num_indices]), bottom_level_indices_range.num, num_neighbours, query, query_proj, query_config, NULL, &(points_to_expand_next[j*num_neighbours]), bottom_level_index_priority, bottom_level_left_pos, bottom_level_right_pos, bottom_level_cur_point_local_ids, bottom_level_cur_point_global_ids, bottom_level_counts, bottom_level_candidate_dists, bottom_level_farthest_dists); for (m = 0; m < num_top_candidates[j]; m++) { points_to_expand_next[j*num_neighbours+m].local_value += bottom_level_indices_range.start; } assert(num_top_candidates[j] <= num_neighbours); } // Remove empty slots in points_to_expand_next and make it contiguous for (k = 0; k < num_points_to_expand; k++) { if (num_top_candidates[k] < num_neighbours) { break; } } if (k < num_points_to_expand) { total_num_top_candidates = k*num_neighbours + num_top_candidates[k]; k++; for (; k < num_points_to_expand; k++) { for (l = 0; l < num_top_candidates[k]; l++) { points_to_expand_next[total_num_top_candidates] = points_to_expand_next[k*num_neighbours+l]; total_num_top_candidates++; } } } else { total_num_top_candidates = num_points_to_expand*num_neighbours; } qsort(points_to_expand_next, total_num_top_candidates, sizeof(idx_elem), dci_compare_idx_elem); num_points_to_expand = min_i(num_neighbours, total_num_top_candidates); } for (k = 0; k < num_points_to_expand; k++) { top_candidates[k] = points_to_expand_next[k]; } return num_points_to_expand; } static void dci_assign_parent(dci* const dci_inst, const int num_populated_levels, const int num_queries, const int *selected_query_pos, const double* const query, const double* const query_proj, const dci_query_config query_config, tree_node* const assigned_parent) { int j; int num_indices = dci_inst->num_comp_indices*dci_inst->num_simp_indices; #pragma omp parallel for for (j = 0; j < num_queries; j++) { int cur_num_returned; idx_elem top_candidate; cur_num_returned = dci_query_single_point(dci_inst, num_populated_levels, 1, &(query[((long long int)selected_query_pos[j])*dci_inst->dim]), &(query_proj[selected_query_pos[j]*num_indices]), query_config, &top_candidate); assert(cur_num_returned == 1); assigned_parent[j].parent = top_candidate.local_value; assigned_parent[j].child = selected_query_pos[j]; } } // nearest_neighbour_dists can be NULL // num_returned can be NULL; if not NULL, it is populated with the number of returned points for each query - it should be of size num_queries // CAUTION: This function allocates memory for each nearest_neighbours[j], nearest_neighbour_dists[j], so we need to deallocate them outside of this function! void dci_query(dci* const dci_inst, const int dim, const int num_queries, const double* const query, const int num_neighbours, const dci_query_config query_config, int** const nearest_neighbours, double** const nearest_neighbour_dists, int* const num_returned) { int j; int num_indices = dci_inst->num_comp_indices*dci_inst->num_simp_indices; double* query_proj; assert(dim == dci_inst->dim); assert(num_neighbours > 0); query_proj = (double *)memalign(64, sizeof(double)*num_indices*num_queries); matmul(num_indices, num_queries, dim, dci_inst->proj_vec, query, query_proj); #pragma omp parallel for for (j = 0; j < num_queries; j++) { int k; int cur_num_returned; idx_elem top_candidates[num_neighbours]; // Maintains the top-k candidates cur_num_returned = dci_query_single_point(dci_inst, dci_inst->num_levels, num_neighbours, &(query[j*dim]), &(query_proj[j*num_indices]), query_config, top_candidates); assert(cur_num_returned <= num_neighbours); nearest_neighbours[j] = (int *)malloc(sizeof(int) * cur_num_returned); for (k = 0; k < cur_num_returned; k++) { nearest_neighbours[j][k] = top_candidates[k].global_value; } if (nearest_neighbour_dists) { nearest_neighbour_dists[j] = (double *)malloc(sizeof(double) * cur_num_returned); for (k = 0; k < cur_num_returned; k++) { nearest_neighbour_dists[j][k] = top_candidates[k].key; } } if (num_returned) { num_returned[j] = cur_num_returned; } } free(query_proj); } void dci_clear(dci* const dci_inst) { int i; if (dci_inst->indices) { for (i = 0; i < dci_inst->num_levels; i++) { free(dci_inst->indices[i]); } free(dci_inst->indices); dci_inst->indices = NULL; } if (dci_inst->next_level_ranges) { for (i = 1; i < dci_inst->num_levels; i++) { free(dci_inst->next_level_ranges[i]); } free(dci_inst->next_level_ranges); dci_inst->next_level_ranges = NULL; } if (dci_inst->num_finest_level_points) { for (i = 1; i < dci_inst->num_levels; i++) { free(dci_inst->num_finest_level_points[i]); } free(dci_inst->num_finest_level_points); dci_inst->num_finest_level_points = NULL; } dci_inst->data = NULL; dci_inst->num_points = 0; dci_inst->num_levels = 0; dci_inst->num_coarse_points = 0; } void dci_reset(dci* const dci_inst) { //srand48(time(NULL)); srand48(5); dci_clear(dci_inst); dci_gen_proj_vec(dci_inst->proj_vec, dci_inst->dim, dci_inst->num_comp_indices*dci_inst->num_simp_indices); } void dci_free(const dci* const dci_inst) { int i; if (dci_inst->indices) { for (i = 0; i < dci_inst->num_levels; i++) { free(dci_inst->indices[i]); } free(dci_inst->indices); } if (dci_inst->next_level_ranges) { for (i = 1; i < dci_inst->num_levels; i++) { free(dci_inst->next_level_ranges[i]); } free(dci_inst->next_level_ranges); } if (dci_inst->num_finest_level_points) { for (i = 1; i < dci_inst->num_levels; i++) { free(dci_inst->num_finest_level_points[i]); } free(dci_inst->num_finest_level_points); } free(dci_inst->proj_vec); }
paralle_for.c
#include <stdio.h> #include <omp.h> #include <unistd.h> int main() { printf(" omp_get_max_threads %d \n", omp_get_max_threads() ); printf(" omp_get_thread_num %d \n", omp_get_thread_num() ); char hn[600]; int ID = 0; #pragma omp parallel { ID = omp_get_thread_num(); gethostname(hn,600); printf("hello from hostname %s Thread Number: %d\n",hn, ID); } printf("Executing with 4 threads"); #pragma omp parallel //num_threads(4) { ID = omp_get_thread_num(); gethostname(hn,600); printf("\nhello from hostname %s Thread Number: %d\n",hn, ID); } printf("Executing with 8 threads"); //omp_set_num_threads(8); #pragma omp parallel { ID = omp_get_thread_num(); gethostname(hn,600); printf("\nhello from hostname %s Thread Number: %d\n",hn, ID); } return(0); }
GB_binop__bget_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bget_uint32 // A.*B function (eWiseMult): GB_AemultB__bget_uint32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bget_uint32 // C+=b function (dense accum): GB_Cdense_accumb__bget_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_uint32 // C=scalar+B GB_bind1st__bget_uint32 // C=scalar+B' GB_bind1st_tran__bget_uint32 // C=A+scalar GB_bind2nd__bget_uint32 // C=A'+scalar GB_bind2nd_tran__bget_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = GB_BITGET (aij, bij, uint32_t, 32) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_BITGET (x, y, uint32_t, 32) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT32 || GxB_NO_BGET_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bget_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bget_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bget_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bget_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bget_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bget_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = GB_BITGET (x, bij, uint32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bget_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = GB_BITGET (aij, y, uint32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (x, aij, uint32_t, 32) ; \ } GrB_Info GB_bind1st_tran__bget_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (aij, y, uint32_t, 32) ; \ } GrB_Info GB_bind2nd_tran__bget_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
displacement_lagrangemultiplier_mixed_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierMixedContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * @details This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierMixedContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierMixedContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierMixedContactCriteria ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor * @param DispRatioTolerance Relative tolerance for displacement residual error * @param DispAbsTolerance Absolute tolerance for displacement residual error * @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierMixedContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : ConvergenceCriteria< TSparseSpace, TDenseSpace >(), mEnsureContact(EnsureContact), mPrintingOutput(PrintingOutput), mTableIsInitialized(false) { mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; mInitialResidualIsSet = false; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierMixedContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : ConvergenceCriteria< TSparseSpace, TDenseSpace >(), mTableIsInitialized(false) { // The default parameters Parameters default_parameters = Parameters(R"( { "ensure_contact" : false, "print_convergence_criterion" : false, "residual_relative_tolerance" : 1.0e-4, "residual_absolute_tolerance" : 1.0e-9, "contact_displacement_relative_tolerance" : 1.0e-4, "contact_displacement_absolute_tolerance" : 1.0e-9 })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); // The displacement solution mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble(); // The contact solution mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); // Additional flags -> NOTE: Replace for a ral flag?¿ mEnsureContact = ThisParameters["ensure_contact"].GetBool(); mPrintingOutput = ThisParameters["print_convergence_criterion"].GetBool(); // We "initialize" the flag-> NOTE: Replace for a ral flag?¿ mInitialResidualIsSet = false; } //* Copy constructor. DisplacementLagrangeMultiplierMixedContactCriteria( DisplacementLagrangeMultiplierMixedContactCriteria const& rOther ) :BaseType(rOther) ,mInitialResidualIsSet(rOther.mInitialResidualIsSet) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm) ,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) ,mPrintingOutput(rOther.mPrintingOutput) ,mTableIsInitialized(rOther.mTableIsInitialized) { } /// Destructor. ~DisplacementLagrangeMultiplierMixedContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something // Initialize TDataType disp_residual_solution_norm = 0.0, lm_solution_norm = 0.0, lm_increase_norm = 0.0; IndexType disp_dof_num(0),lm_dof_num(0); // Loop over Dofs #pragma omp parallel for reduction(+:disp_residual_solution_norm,lm_solution_norm,lm_increase_norm,disp_dof_num,lm_dof_num) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = rDofSet.begin() + i; std::size_t dof_id; TDataType residual_dof_value, dof_value, dof_incr; if (it_dof->IsFree()) { dof_id = it_dof->EquationId(); const auto curr_var = it_dof->GetVariable(); if ((curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { dof_value = it_dof->GetSolutionStepValue(0); dof_incr = rDx[dof_id]; lm_solution_norm += dof_value * dof_value; lm_increase_norm += dof_incr * dof_incr; lm_dof_num++; } else { residual_dof_value = rb[dof_id]; disp_residual_solution_norm += residual_dof_value * residual_dof_value; disp_dof_num++; } } } if(lm_increase_norm == 0.0) lm_increase_norm = 1.0; KRATOS_ERROR_IF(mEnsureContact && lm_solution_norm == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; mDispCurrentResidualNorm = disp_residual_solution_norm; const TDataType lm_ratio = std::sqrt(lm_increase_norm/lm_solution_norm); const TDataType lm_abs = std::sqrt(lm_increase_norm)/ static_cast<TDataType>(lm_dof_num); TDataType residual_disp_ratio; // We initialize the solution if (mInitialResidualIsSet == false) { mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm; residual_disp_ratio = 1.0; mInitialResidualIsSet = true; } // We calculate the ratio of the displacements residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm; // We calculate the absolute norms TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num; // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance; } else { std::cout.precision(4); if (mPrintingOutput == false) { KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("MIXED CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "MIXED CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tLAGRANGE MUL: RATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > lm_ratio) ? residual_disp_ratio : lm_ratio; r_process_info[RESIDUAL_NORM] = (lm_abs > mLMAbsTolerance) ? lm_abs : mLMAbsTolerance; // We check if converged const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance); const bool lm_converged = (!mEnsureContact && lm_solution_norm == 0.0) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance); if ( disp_converged && lm_converged ) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& table = p_table->GetTable(); if (mPrintingOutput == false) table << BOLDFONT(FGRN(" Achieved")); else table << "Achieved"; } else { if (mPrintingOutput == false) KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tConvergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& table = p_table->GetTable(); if (mPrintingOutput == false) table << BOLDFONT(FRED(" Not achieved")); else table << "Not achieved"; } else { if (mPrintingOutput == false) KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tConvergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mTableIsInitialized == false) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& table = p_table->GetTable(); table.AddColumn("DP RATIO", 10); table.AddColumn("EXP. RAT", 10); table.AddColumn("ABS", 10); table.AddColumn("EXP. ABS", 10); table.AddColumn("LM RATIO", 10); table.AddColumn("EXP. RAT", 10); table.AddColumn("ABS", 10); table.AddColumn("EXP. ABS", 10); table.AddColumn("CONVERGENCE", 15); mTableIsInitialized = true; } } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { mInitialResidualIsSet = false; } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ bool mInitialResidualIsSet; /// This "flag" is set in order to set that the initial residual is already computed bool mEnsureContact; /// This "flag" is used to check that the norm of the LM is always greater than 0 (no contact) bool mPrintingOutput; /// If the colors and bold are printed bool mTableIsInitialized; /// If the table is already initialized TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; ///@} // Kratos classes ///@} // Application group } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H */
DKsetR.c
/* DKsetR.c */ #define XPEE 6 #define XPEf 720 #define XPBX 64 #define XRMX 17000 static PNUM PEE,PEf,PBX,RVAL; static bool DUPLI[DCX]; static PNUM RAIZ[DCX]; static PNUM SZ[DCX]; static PNUM QLE[DCX]; static PNUM QGE[DCX]; static I8 PPsmall[XPEf][XPEE]; static I8 PPbig[XPEf][XPBX]; static PNUM RRR[XRMX]; static U64 APO[XRMX]; void DRfillPP(int e) { I8 PPcifra[XPEE]; I8 BITPBX[XPBX]; I8 d; I16 perm,dig; bool ok; PNUM s,nx,t; printf("DRfillPP DKsetR.c \n"); PEE=e;PEf=factorial(e);PBX=PW2[e]; printf("QQ matrix. PEE %d PEf %d PBX %d \n" ,PEE,PEf,PBX); assert(PEE LE XPEE);assert(PEf LE XPEf); assert(PBX LE XPBX); perm=0;nx=pow(10,e); for(PNUM n=0;n<nx;n++) { t=n;ok=TRUE; for(I8 c=0;c<e;c++) {dig=t%10;if(dig GE e){ok=FALSE;break;} PPcifra[c]=dig;t/=10; } if(!ok)continue; for(I8 c=0;c<e;c++) for(I8 f=c+1;f<e;f++) if(PPcifra[c] EQ PPcifra[f]) {ok=FALSE;break;} if(!ok)continue; for(I8 c=0;c<e;c++) PPsmall[perm][c]=PPcifra[c]; perm++;if(perm GE PEf)break; } assert(perm EQ PEf); printf("Permutaciones e!.\n"); for(int p=0;p<PEf;p++) {for(int n=0;n<PBX;n++) {PPbig[p][n]=0; for(int b=0;b<PEE;b++) {d=PPsmall[p][b];BITPBX[d]=nbiti(n,b); } s=0; for(int b=0;b<PEE;b++) {if(BITPBX[b])s+=PW2[b]; } PPbig[p][n]=s; } } printf("Permutaciones calculadas.\n"); } PNUM DRfillDRS(void) { LNUM GLBPPX[XPEf]; bool GLBNUL[XPEf]; I8 BITPBX[XPBX]; LNUM Ci,cmx; I16 sze; const int trz=1000000; printf("DRfillDRS DKsetR.c \n"); assert(iC>0); #pragma omp parallel for for(PNUM i=0;i<DCX;i++) { DUPLI[i]=FALSE;SZ[i]=0;RAIZ[i]=0; } printf("Generando R... \n"); for(PNUM iii=0;iii<iC;iii++) { if(iii%trz EQ 0)printf(" %d",iii/trz); if(DUPLI[iii])continue; cmx=C[iC-1];Ci=C[iii]; for(I16 p=0;p<PEf;p++) { LNUM s; GLBPPX[p]=0; for(I16 b=0;b<PBX;b++)BITPBX[b]=0; for(I16 b=0;b<PBX;b++) { I8 d; d=PPbig[p][b];BITPBX[d]=nbiti(Ci,b); } s=0; for(I16 b=0;b<PBX;b++)s+=BITPBX[b]*PW2[b]; GLBPPX[p]=s; } for(I16 i=0;i<PEf;i++)GLBNUL[i]=FALSE; for(I16 i=0;i<PEf;i++) { LNUM s; s=GLBPPX[i]; for(I16 j=i+1;j<PEf;j++) {if(!GLBNUL[j] AND (GLBPPX[j] EQ s))GLBNUL[j]=TRUE; } } sze=0; for(I16 i=0;i<PEf;i++) { if(GLBNUL[i])continue; GLBPPX[sze++]=GLBPPX[i]; } for(I16 i=0 ;i<sze;i++)GLBNUL[i]=0; for(I16 i=sze;i<PEf;i++)GLBNUL[i]=1; assert(sze GE 1); assert((PEf%sze) EQ 0); SZ[iii]=sze;RAIZ[iii]=iii; for(I16 p=0;p<sze;p++) { PNUM j;LNUM s; assert(!GLBNUL[p]);s=GLBPPX[p]; if( (s EQ Ci)OR(s GT cmx)OR((s%2) EQ 0) OR(s LE Ci) )continue; j=DfindCi(0,(iC-1),s); assert(j NE iii); DUPLI[j]=TRUE;RAIZ[j]=iii;SZ[j]=sze; } } { PNUM ctd,sorb,ri,pore; U64 porb; newl();printf("Verificando SZ. \n"); ctd=0;sorb=0; for(PNUM i=0;i<iC;i++) { if(DUPLI[i])continue; sorb+=SZ[i];RRR[ctd++]=i; } RVAL=ctd; assert(sorb EQ iC); printf("Checked SumOrb = iC. \n"); printf("RVAL. wC %d iC %d Rval= %d \n" ,wC,iC,RVAL); sorb=0;porb=1; pore=0; for(PNUM i=0;i<RVAL;i++) { ri=RRR[i]; sorb+=SZ[ri]; porb*=SZ[ri]; while(porb>0 AND (porb%PEf) EQ 0) {porb/=PEf;pore++;} } printf("sumaorb= %8d \n",sorb); printf("porb %I64d \n",porb); printf("pore %d PEf %d \n",pore,PEf); } return RVAL; } void Rcheckcomon(void) { BNUM sumQLE=0,sumQGE=0; PNUM sumsz=0; printf("Rcheckcomon \n"); for(PNUM i=0;i<iC;i++) { sumQLE+=QLE[i];sumQGE+=QGE[i]; } assert(sumQLE EQ sumQGE); printf("Checked QQ.\n"); for(PNUM i=0;i<iC;i++) { PNUM raiz=RAIZ[i]; assert(SZ[i] GT 0);assert(QLE[i] GT 0); assert(QGE[i] GT 0); if(DUPLI[i]) { assert(!DUPLI[raiz]); assert(SZ[i] EQ SZ[raiz]); assert(QLE[i] EQ QLE[raiz]); assert(QGE[i] EQ QGE[raiz]); } else {assert(raiz EQ i);sumsz+=SZ[i]; } } assert(sumsz EQ iC); printf("Checked iC structures.\n"); for(PNUM i=0;i<RVAL;i++) { PNUM ri=RRR[i];assert(!DUPLI[ri]); assert(RAIZ[ri] EQ ri); } printf("Checked RVAL structures.\n"); } void DRfillQQ(void) { const int trz=1000; printf("DRfillQQ DKsetR.c \n"); #pragma omp parallel for for(PNUM i=0;i<DCX;i++) {QLE[i]=0;QGE[i]=0; } printf("Fill QQ.\n"); printf("DR scan up/down \n"); #pragma omp parallel for ordered for(PNUM i=0;i<RVAL;i++) { PNUM ri=RRR[i]; QLE[ri]=DRskanup(ri); QGE[ri]=DRskandown(ri); if(i%trz EQ 0) { #pragma omp critical printf(" %d",i/trz); } } newl();printf("Copiando pares. \n"); #pragma omp parallel for ordered for(PNUM i=0;i<iC;i++) { if(DUPLI[i]) {QLE[i]=QLE[RAIZ[i]];QGE[i]=QGE[RAIZ[i]];} } printf("QQ filled. \n"); } U64 DandorQQ(PNUM pp) { LNUM aa,bb,mitab,jonab; PNUM ii,mj,mk,n,nm1,mjp,mkp,cj,rmit,sjon; U64 apo,rs,tmp; apo=0ll;cj=0; mjp=0;mkp=0;ii=pp;aa=C[ii];n=iC;nm1=n-1; for(PNUM j=0;j<iC;j++) { cj++; bb=C[j]; mitab=(aa&bb);mj=mjp+1; if(mitab NE C[mj])mj=DfindCi(0,ii,mitab); rmit=QLE[mj];mjp=mj; jonab=(aa|bb);mk=mkp+1; if(jonab NE C[mk])mk=DfindCi(ii,nm1,jonab); sjon=QGE[mk];mkp=mk; rs=(U64)rmit; rs*=(U64)sjon; tmp=apo;apo+=rs; assert(apo GT tmp); } assert(cj EQ iC); return apo; } #define RV32MX 2147483647L #define RV64MX 9223372036854775807ll typedef struct { I64 W[2]; } RDI64; typedef union { U128 L; RDI64 D; } RUS128; void DRx4magic(PNUM R) { PNUM TC[TASKMX]; printf("DRx4magic DKsetR.c \n"); printf("Calculando APOs. \n"); for(int i=0;i<TASKMX;i++)TC[i]=0; #pragma omp parallel for for(PNUM i=0;i<XRMX;i++)APO[i]=0LL; #pragma omp parallel for ordered for(I32 i=0;i<RVAL;i++) { U64 v;I32 ri; ri=RRR[i];v=DandorQQ(ri); #pragma omp critical { int tnum;PNUM TCS; APO[i]=v; tnum=omp_get_thread_num(); TC[tnum]++; if(tnum EQ 0) { TCS=0; for(int i=0;i<TASKMX;i++)TCS+=TC[i]; printf(" %d",TCS); } } } { FILE *f;PNUM ri;int tnum; newl(); tnum=omp_get_num_threads(); assert(tnum EQ 1); f=fopen("APOS.LOG","w"); for(PNUM i=0;i<RVAL;i++) { ri=RRR[i]; fprintf(f,"%6d %I64d \n",SZ[ri],APO[i]); } fclose(f); printf("filed LOG \n"); } #pragma omp master { U128 sum,tmp,sapo; U64 ss; PNUM TCS; sum=0LL;sapo=0LL; #pragma omp critical for(PNUM i=0;i<RVAL;i++) { ss=(U64)SZ[RRR[i]]; tmp=0LL;tmp=ss; ss=(U64)APO[i]; sapo+=(U128)ss; tmp*=(U128)ss;assert(tmp GT 0LL); sum+=(U128)tmp;assert(sum GT 0LL); } printf("d%d= \n",PEE+2); print128(sum); printf("Sum APO = \n"); print128(sapo); TCS=0;for(int i=0;i<TASKMX;i++)TCS+=TC[i]; printf("TCS %d \n",TCS); assert(TCS EQ RVAL); } } void DRdemo(int ee) { PNUM P;int e; e=ee; printf("DRdemo DKsetR.c \n"); DKCe0();for(int i=0;i<e;i++)DKCx2(); iC1=iC-1;eC=e; printf("C%d= %d \n",e,iC); DRfillPP(e); DKbfillCi(); P=DRfillDRS(); printf("R(%d)=[%d*%d] \n",e,wC,P); DRfillQQ(); Rcheckcomon(); DRx4magic(P); }
GB_binop__bget_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bget_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint16) // C=scalar+B GB (_bind1st__bget_uint16) // C=scalar+B' GB (_bind1st_tran__bget_uint16) // C=A+scalar GB (_bind2nd__bget_uint16) // C=A'+scalar GB (_bind2nd_tran__bget_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_BITGET (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, uint16_t, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT16 || GxB_NO_BGET_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bget_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bget_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bget_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, uint16_t, 16) ; \ } GrB_Info GB (_bind1st_tran__bget_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, uint16_t, 16) ; \ } GrB_Info GB (_bind2nd_tran__bget_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_spat_to_SH.gen.c
/* * Copyright (c) 2010-2015 Centre National de la Recherche Scientifique. * written by Nathanael Schaeffer (CNRS, ISTerre, Grenoble, France). * * nathanael.schaeffer@ujf-grenoble.fr * * This software is governed by the CeCILL license under French law and * abiding by the rules of distribution of free software. You can use, * modify and/or redistribute the software under the terms of the CeCILL * license as circulated by CEA, CNRS and INRIA at the following URL * "http://www.cecill.info". * * The fact that you are presently reading this means that you have had * knowledge of the CeCILL license and that you accept its terms. * */ # This file is meta-code for SHT.c (spherical harmonic transform). # it is intended for "make" to generate C code for 3 similar SHT functions, # (namely spat_to_SH [Q tag]), spat_to_SHsphtor [V tag], spat_to_SH3 [both Q&V tags]) # from one generic function + tags. # Basically, there are tags at the beginning of lines (Q,V) that are information # to keep or remove the line depending on the function to build. (Q for scalar, V for vector, # for comment) # ////////////////////////////////////////////////// static QX void GEN3(_an1,NWAY,SUFFIX)(shtns_cfg shtns, double *BrF, cplx *Qlm, const long int llim, const int imlim) { VX void GEN3(_an2,NWAY,SUFFIX)(shtns_cfg shtns, double *BtF, double *BpF, cplx *Slm, cplx *Tlm, const long int llim, const int imlim) { 3 void GEN3(_an3,NWAY,SUFFIX)(shtns_cfg shtns, double *BrF, double *BtF, double *BpF, cplx *Qlm, cplx *Slm, cplx *Tlm, const long int llim, const int imlim) { double *alm, *al; double *wg, *ct, *st; V double *l_2; long int nk, k, l,m; int k_inc, m_inc; unsigned m0, mstep; #ifndef SHT_AXISYM unsigned im; V double m_1; #endif #if _GCC_VEC_ Q rnd qq[2*llim]; V rnd ss[2*llim]; V rnd tt[2*llim]; #else Q double qq[llim]; V double ss[llim]; V double tt[llim]; #endif Q double rer[NLAT_2 + NWAY*VSIZE2] SSE; Q double ror[NLAT_2 + NWAY*VSIZE2] SSE; V double ter[NLAT_2 + NWAY*VSIZE2] SSE; V double tor[NLAT_2 + NWAY*VSIZE2] SSE; V double per[NLAT_2 + NWAY*VSIZE2] SSE; V double por[NLAT_2 + NWAY*VSIZE2] SSE; #ifndef SHT_AXISYM Q double rei[NLAT_2 + NWAY*VSIZE2] SSE; Q double roi[NLAT_2 + NWAY*VSIZE2] SSE; V double tei[NLAT_2 + NWAY*VSIZE2] SSE; V double toi[NLAT_2 + NWAY*VSIZE2] SSE; V double pei[NLAT_2 + NWAY*VSIZE2] SSE; V double poi[NLAT_2 + NWAY*VSIZE2] SSE; #endif // ACCESS PATTERN k_inc = shtns->k_stride_a; m_inc = shtns->m_stride_a; nk = NLAT_2; // copy NLAT_2 to a local variable for faster access (inner loop limit) #if _GCC_VEC_ nk = ((unsigned) nk+(VSIZE2-1))/VSIZE2; #endif wg = shtns->wg; ct = shtns->ct; st = shtns->st; V l_2 = shtns->l_2; for (k=nk*VSIZE2; k<(nk-1+NWAY)*VSIZE2; ++k) { // never written, so this is now done for all m's Q rer[k] = 0.0; ror[k] = 0.0; V ter[k] = 0.0; tor[k] = 0.0; V per[k] = 0.0; por[k] = 0.0; #ifndef SHT_AXISYM Q rei[k] = 0.0; roi[k] = 0.0; V tei[k] = 0.0; toi[k] = 0.0; V pei[k] = 0.0; poi[k] = 0.0; #endif } #ifndef _OPENMP m0 = 0; mstep = 1; #else m0 = omp_get_thread_num(); mstep = omp_get_num_threads(); if (m0 == 0) #endif { // im=0 : dzl.p = 0.0 and evrything is REAL alm = shtns->blm; V k=0; do { // compute symmetric and antisymmetric parts. (do not weight here, it is cheaper to weight y0) V double an = BtF[k*k_inc]; double bn = BtF[k*k_inc +1]; V double bs = BtF[(NLAT-2-k)*k_inc]; double as = BtF[(NLAT-2-k)*k_inc +1]; V ter[k] = an+as; tor[k] = an-as; V ter[k+1] = bn+bs; tor[k+1] = bn-bs; V k+=2; V } while(k < nk*VSIZE2); V k=0; do { // compute symmetric and antisymmetric parts. (do not weight here, it is cheaper to weight y0) V double an = BpF[k*k_inc]; double bn = BpF[k*k_inc +1]; V double bs = BpF[(NLAT-2-k)*k_inc]; double as = BpF[(NLAT-2-k)*k_inc +1]; V per[k] = an+as; por[k] = an-as; V per[k+1] = bn+bs; por[k+1] = bn-bs; V k+=2; V } while(k < nk*VSIZE2); Q double r0a = 0.0; double r0b = 0.0; Q k=0; do { // compute symmetric and antisymmetric parts. (do not weight here, it is cheaper to weight y0) Q double an = BrF[k*k_inc]; double bn = BrF[k*k_inc +1]; Q double bs = BrF[(NLAT-2-k)*k_inc]; double as = BrF[(NLAT-2-k)*k_inc +1]; Q rer[k] = an+as; ror[k] = an-as; Q rer[k+1] = bn+bs; ror[k+1] = bn-bs; Q r0a += (an+as)*wg[k]; r0b += (bn+bs)*wg[k+1]; Q k+=2; Q } while(k < nk*VSIZE2); Q Qlm[0] = (r0a+r0b) * alm[0]; // l=0 is done. V Slm[0] = 0.0; Tlm[0] = 0.0; // l=0 is zero for the vector transform. k = 0; for (l=0;l<llim;++l) { Q qq[l] = vall(0.0); V ss[l] = vall(0.0); tt[l] = vall(0.0); } do { al = alm; rnd cost[NWAY], y0[NWAY], y1[NWAY]; V rnd sint[NWAY], dy0[NWAY], dy1[NWAY]; Q rnd rerk[NWAY], rork[NWAY]; // help the compiler to cache into registers. V rnd terk[NWAY], tork[NWAY], perk[NWAY], pork[NWAY]; for (int j=0; j<NWAY; ++j) { cost[j] = vread(ct, k+j); y0[j] = vall(al[0]) * vread(wg, k+j); // weight of Gauss quadrature appears here V dy0[j] = vall(0.0); V sint[j] = -vread(st, k+j); y1[j] = (vall(al[1])*y0[j]) * cost[j]; V dy1[j] = (vall(al[1])*y0[j]) * sint[j]; Q rerk[j] = vread(rer, k+j); rork[j] = vread(ror, k+j); // cache into registers. V terk[j] = vread(ter, k+j); tork[j] = vread(tor, k+j); V perk[j] = vread(per, k+j); pork[j] = vread(por, k+j); } al+=2; l=1; while(l<llim) { for (int j=0; j<NWAY; ++j) { V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*sint[j]) + vall(al[0])*dy0[j]; y0[j] = vall(al[1])*(cost[j]*y1[j]) + vall(al[0])*y0[j]; } for (int j=0; j<NWAY; ++j) { Q qq[l-1] += y1[j] * rork[j]; V ss[l-1] += dy1[j] * terk[j]; V tt[l-1] -= dy1[j] * perk[j]; } for (int j=0; j<NWAY; ++j) { V dy1[j] = vall(al[3])*(cost[j]*dy0[j] + y0[j]*sint[j]) + vall(al[2])*dy1[j]; y1[j] = vall(al[3])*(cost[j]*y0[j]) + vall(al[2])*y1[j]; } for (int j=0; j<NWAY; ++j) { Q qq[l] += y0[j] * rerk[j]; V ss[l] += dy0[j] * tork[j]; V tt[l] -= dy0[j] * pork[j]; } al+=4; l+=2; } if (l==llim) { for (int j=0; j<NWAY; ++j) { Q qq[l-1] += y1[j] * rork[j]; V ss[l-1] += dy1[j] * terk[j]; V tt[l-1] -= dy1[j] * perk[j]; } } k+=NWAY; } while (k < nk); for (l=1; l<=llim; ++l) { #if _GCC_VEC_ Q ((v2d*)Qlm)[l] = v2d_reduce(qq[l-1], vall(0)); V ((v2d*)Slm)[l] = v2d_reduce(ss[l-1], vall(0)) * vdup(l_2[l]); V ((v2d*)Tlm)[l] = v2d_reduce(tt[l-1], vall(0)) * vdup(l_2[l]); #else Q Qlm[l] = qq[l-1]; V Slm[l] = ss[l-1]*l_2[l]; Tlm[l] = tt[l-1]*l_2[l]; #endif } #ifdef SHT_VAR_LTR for (l=llim+1; l<= LMAX; ++l) { Q ((v2d*)Qlm)[l] = vdup(0.0); V ((v2d*)Slm)[l] = vdup(0.0); ((v2d*)Tlm)[l] = vdup(0.0); } #ifndef SHT_AXISYM if (imlim <= MMAX) { // zero out m >= imlim l = LiM(shtns, imlim*MRES, imlim); do { Q ((v2d*)Qlm)[l] = vdup(0.0); V ((v2d*)Slm)[l] = vdup(0.0); ((v2d*)Tlm)[l] = vdup(0.0); } while(++l < shtns->nlm); } #endif #endif m0=mstep; } #ifndef SHT_AXISYM for (im=m0; im<imlim; im+=mstep) { m = im*MRES; l = shtns->tm[im] / VSIZE2; //alm = shtns->blm[im]; alm = shtns->blm + im*(2*LMAX -m+MRES); Q k = ((l*VSIZE2)>>1)*2; // k must be even here. Q do { // compute symmetric and antisymmetric parts, and reorganize data. Q double an, bn, ani, bni, bs, as, bsi, asi, t; 3 double sina = st[k]; double sinb = st[k+1]; Q ani = BrF[im*m_inc + k*k_inc]; bni = BrF[im*m_inc + k*k_inc +1]; // north Q an = BrF[(NPHI-im)*m_inc + k*k_inc]; bn = BrF[(NPHI-im)*m_inc + k*k_inc +1]; Q t = ani-an; an += ani; ani = bn-bni; bn += bni; bni = t; 3 an *= sina; ani*= sina; bn *= sinb; bni *= sinb; Q bsi = BrF[im*m_inc + (NLAT-2 -k)*k_inc]; asi = BrF[im*m_inc + (NLAT-2-k)*k_inc + 1]; // south Q bs = BrF[(NPHI-im)*m_inc +(NLAT-2-k)*k_inc]; as = BrF[(NPHI-im)*m_inc +(NLAT-2-k)*k_inc +1]; Q t = bsi-bs; bs += bsi; bsi = as-asi; as += asi; asi = t; 3 as *= sina; asi*= sina; bs *= sinb; bsi *= sinb; Q rer[k] = an+as; rei[k] = ani+asi; rer[k+1] = bn+bs; rei[k+1] = bni+bsi; Q ror[k] = an-as; roi[k] = ani-asi; ror[k+1] = bn-bs; roi[k+1] = bni-bsi; Q k+=2; Q } while (k<nk*VSIZE2); V k = ((l*VSIZE2)>>1)*2; // k must be even here. V do { // compute symmetric and antisymmetric parts, and reorganize data. V double an, bn, ani, bni, bs, as, bsi, asi, t; V ani = BtF[im*m_inc + k*k_inc]; bni = BtF[im*m_inc + k*k_inc +1]; // north V an = BtF[(NPHI-im)*m_inc + k*k_inc]; bn = BtF[(NPHI-im)*m_inc + k*k_inc +1]; V t = ani-an; an += ani; ani = bn-bni; bn += bni; bni = t; V bsi = BtF[im*m_inc + (NLAT-2 -k)*k_inc]; asi = BtF[im*m_inc + (NLAT-2-k)*k_inc + 1]; // south V bs = BtF[(NPHI-im)*m_inc +(NLAT-2-k)*k_inc]; as = BtF[(NPHI-im)*m_inc +(NLAT-2-k)*k_inc +1]; V t = bsi-bs; bs += bsi; bsi = as-asi; as += asi; asi = t; V ter[k] = an+as; tei[k] = ani+asi; ter[k+1] = bn+bs; tei[k+1] = bni+bsi; V tor[k] = an-as; toi[k] = ani-asi; tor[k+1] = bn-bs; toi[k+1] = bni-bsi; V k+=2; V } while (k<nk*VSIZE2); V k = ((l*VSIZE2)>>1)*2; // k must be even here. V do { // compute symmetric and antisymmetric parts, and reorganize data. V double an, bn, ani, bni, bs, as, bsi, asi, t; V ani = BpF[im*m_inc + k*k_inc]; bni = BpF[im*m_inc + k*k_inc +1]; // north V an = BpF[(NPHI-im)*m_inc + k*k_inc]; bn = BpF[(NPHI-im)*m_inc + k*k_inc +1]; V t = ani-an; an += ani; ani = bn-bni; bn += bni; bni = t; V bsi = BpF[im*m_inc + (NLAT-2 -k)*k_inc]; asi = BpF[im*m_inc + (NLAT-2-k)*k_inc + 1]; // south V bs = BpF[(NPHI-im)*m_inc +(NLAT-2-k)*k_inc]; as = BpF[(NPHI-im)*m_inc +(NLAT-2-k)*k_inc +1]; V t = bsi-bs; bs += bsi; bsi = as-asi; as += asi; asi = t; V per[k] = an+as; pei[k] = ani+asi; per[k+1] = bn+bs; pei[k+1] = bni+bsi; V por[k] = an-as; poi[k] = ani-asi; por[k+1] = bn-bs; poi[k+1] = bni-bsi; V k+=2; V } while (k<nk*VSIZE2); V m_1 = 1.0/m; k=l; #if _GCC_VEC_ Q rnd* q = qq; V rnd* s = ss; rnd* t = tt; #else l = LiM(shtns, m, im); Q double* q = (double *) &Qlm[l]; V double* s = (double *) &Slm[l]; V double* t = (double *) &Tlm[l]; #endif for (l=llim-m; l>=0; l--) { Q q[0] = vall(0.0); q[1] = vall(0.0); q+=2; V s[0] = vall(0.0); s[1] = vall(0.0); s+=2; V t[0] = vall(0.0); t[1] = vall(0.0); t+=2; } do { #if _GCC_VEC_ Q rnd* q = qq; V rnd* s = ss; rnd* t = tt; #else l = LiM(shtns, m, im); Q double* q = (double *) &Qlm[l]; V double* s = (double *) &Slm[l]; V double* t = (double *) &Tlm[l]; #endif al = alm; rnd cost[NWAY], y0[NWAY], y1[NWAY]; V rnd st2[NWAY], dy0[NWAY], dy1[NWAY]; Q rnd rerk[NWAY], reik[NWAY], rork[NWAY], roik[NWAY]; // help the compiler to cache into registers. V rnd terk[NWAY], teik[NWAY], tork[NWAY], toik[NWAY]; V rnd perk[NWAY], peik[NWAY], pork[NWAY], poik[NWAY]; for (int j=0; j<NWAY; ++j) { cost[j] = vread(st, k+j); y0[j] = vall(0.5); V st2[j] = cost[j]*cost[j]*vall(-m_1); V y0[j] *= vall(m); // for the vector transform, compute ylm*m/sint } Q l=m; V l=m-1; long int ny = 0; // exponent to extend double precision range. if ((int)llim <= SHT_L_RESCALE_FLY) { do { // sin(theta)^m if (l&1) for (int j=0; j<NWAY; ++j) y0[j] *= cost[j]; for (int j=0; j<NWAY; ++j) cost[j] *= cost[j]; } while(l >>= 1); } else { long int nsint = 0; do { // sin(theta)^m (use rescaling to avoid underflow) if (l&1) { for (int j=0; j<NWAY; ++j) y0[j] *= cost[j]; ny += nsint; if (vlo(y0[0]) < (SHT_ACCURACY+1.0/SHT_SCALE_FACTOR)) { ny--; for (int j=0; j<NWAY; ++j) y0[j] *= vall(SHT_SCALE_FACTOR); } } for (int j=0; j<NWAY; ++j) cost[j] *= cost[j]; nsint += nsint; if (vlo(cost[0]) < 1.0/SHT_SCALE_FACTOR) { nsint--; for (int j=0; j<NWAY; ++j) cost[j] *= vall(SHT_SCALE_FACTOR); } } while(l >>= 1); } for (int j=0; j<NWAY; ++j) { y0[j] *= vall(al[0]); cost[j] = vread(ct, k+j); V dy0[j] = cost[j]*y0[j]; y1[j] = (vall(al[1])*y0[j]) *cost[j]; V dy1[j] = (vall(al[1])*y0[j]) *(cost[j]*cost[j] + st2[j]); } l=m; al+=2; while ((ny<0) && (l<llim)) { // ylm treated as zero and ignored if ny < 0 for (int j=0; j<NWAY; ++j) { V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*st2[j]) + vall(al[0])*dy0[j]; y0[j] = vall(al[1])*(cost[j]*y1[j]) + vall(al[0])*y0[j]; } for (int j=0; j<NWAY; ++j) { V dy1[j] = vall(al[3])*(cost[j]*dy0[j] + y0[j]*st2[j]) + vall(al[2])*dy1[j]; y1[j] = vall(al[3])*(cost[j]*y0[j]) + vall(al[2])*y1[j]; } l+=2; al+=4; if (fabs(vlo(y0[NWAY-1])) > SHT_ACCURACY*SHT_SCALE_FACTOR + 1.0) { // rescale when value is significant ++ny; for (int j=0; j<NWAY; ++j) { y0[j] *= vall(1.0/SHT_SCALE_FACTOR); y1[j] *= vall(1.0/SHT_SCALE_FACTOR); V dy0[j] *= vall(1.0/SHT_SCALE_FACTOR); dy1[j] *= vall(1.0/SHT_SCALE_FACTOR); } } } if (ny == 0) { Q q+=2*(l-m); V s+=2*(l-m); t+=2*(l-m); for (int j=0; j<NWAY; ++j) { // prefetch y0[j] *= vread(wg, k+j); y1[j] *= vread(wg, k+j); // weight appears here (must be after the previous accuracy loop). V dy0[j] *= vread(wg, k+j); dy1[j] *= vread(wg, k+j); Q rerk[j] = vread( rer, k+j); reik[j] = vread( rei, k+j); rork[j] = vread( ror, k+j); roik[j] = vread( roi, k+j); V terk[j] = vread( ter, k+j); teik[j] = vread( tei, k+j); tork[j] = vread( tor, k+j); toik[j] = vread( toi, k+j); V perk[j] = vread( per, k+j); peik[j] = vread( pei, k+j); pork[j] = vread( por, k+j); poik[j] = vread( poi, k+j); } while (l<llim) { // compute even and odd parts Q for (int j=0; j<NWAY; ++j) q[0] += y0[j] * rerk[j]; // real even Q for (int j=0; j<NWAY; ++j) q[1] += y0[j] * reik[j]; // imag even V for (int j=0; j<NWAY; ++j) s[0] += dy0[j] * tork[j] + y0[j] * peik[j]; V for (int j=0; j<NWAY; ++j) s[1] += dy0[j] * toik[j] - y0[j] * perk[j]; V for (int j=0; j<NWAY; ++j) t[0] -= dy0[j] * pork[j] - y0[j] * teik[j]; V for (int j=0; j<NWAY; ++j) t[1] -= dy0[j] * poik[j] + y0[j] * terk[j]; for (int j=0; j<NWAY; ++j) { V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*st2[j]) + vall(al[0])*dy0[j]; y0[j] = vall(al[1])*(cost[j]*y1[j]) + vall(al[0])*y0[j]; } Q for (int j=0; j<NWAY; ++j) q[2] += y1[j] * rork[j]; // real odd Q for (int j=0; j<NWAY; ++j) q[3] += y1[j] * roik[j]; // imag odd V for (int j=0; j<NWAY; ++j) s[2] += dy1[j] * terk[j] + y1[j] * poik[j]; V for (int j=0; j<NWAY; ++j) s[3] += dy1[j] * teik[j] - y1[j] * pork[j]; V for (int j=0; j<NWAY; ++j) t[2] -= dy1[j] * perk[j] - y1[j] * toik[j]; V for (int j=0; j<NWAY; ++j) t[3] -= dy1[j] * peik[j] + y1[j] * tork[j]; Q q+=4; V s+=4; t+=4; for (int j=0; j<NWAY; ++j) { V dy1[j] = vall(al[3])*(cost[j]*dy0[j] + y0[j]*st2[j]) + vall(al[2])*dy1[j]; y1[j] = vall(al[3])*(cost[j]*y0[j]) + vall(al[2])*y1[j]; } l+=2; al+=4; } if (l==llim) { Q for (int j=0; j<NWAY; ++j) q[0] += y0[j] * rerk[j]; // real even Q for (int j=0; j<NWAY; ++j) q[1] += y0[j] * reik[j]; // imag even V for (int j=0; j<NWAY; ++j) s[0] += dy0[j] * tork[j] + y0[j] * peik[j]; V for (int j=0; j<NWAY; ++j) s[1] += dy0[j] * toik[j] - y0[j] * perk[j]; V for (int j=0; j<NWAY; ++j) t[0] -= dy0[j] * pork[j] - y0[j] * teik[j]; V for (int j=0; j<NWAY; ++j) t[1] -= dy0[j] * poik[j] + y0[j] * terk[j]; } } k+=NWAY; } while (k < nk); l = LiM(shtns, m, im); Q v2d *Ql = (v2d*) &Qlm[l]; V v2d *Sl = (v2d*) &Slm[l]; V v2d *Tl = (v2d*) &Tlm[l]; #if _GCC_VEC_ for (l=0; l<=llim-m; ++l) { QX Ql[l] = v2d_reduce(qq[2*l], qq[2*l+1]); 3 Ql[l] = v2d_reduce(qq[2*l], qq[2*l+1]) * vdup(m_1); V Sl[l] = v2d_reduce(ss[2*l], ss[2*l+1]) * vdup(l_2[l+m]); V Tl[l] = v2d_reduce(tt[2*l], tt[2*l+1]) * vdup(l_2[l+m]); } #else V for (l=0; l<=llim-m; ++l) { 3 Ql[l] *= m_1; V Sl[l] *= l_2[l+m]; V Tl[l] *= l_2[l+m]; V } #endif #ifdef SHT_VAR_LTR for (l=llim+1-m; l<=LMAX-m; ++l) { Q Ql[l] = vdup(0.0); V Sl[l] = vdup(0.0); Tl[l] = vdup(0.0); } #endif } #endif } QX static void GEN3(spat_to_SH_omp,NWAY,SUFFIX)(shtns_cfg shtns, double *Vr, cplx *Qlm, long int llim) { VX static void GEN3(spat_to_SHsphtor_omp,NWAY,SUFFIX)(shtns_cfg shtns, double *Vt, double *Vp, cplx *Slm, cplx *Tlm, long int llim) { 3 static void GEN3(spat_to_SHqst_omp,NWAY,SUFFIX)(shtns_cfg shtns, double *Vr, double *Vt, double *Vp, cplx *Qlm, cplx *Slm, cplx *Tlm, long int llim) { Q double *BrF; // contains the Fourier transformed data V double *BtF, *BpF; // contains the Fourier transformed data unsigned imlim=0; Q BrF = Vr; V BtF = Vt; BpF = Vp; #ifndef SHT_AXISYM imlim = MTR; #ifdef SHT_VAR_LTR if (imlim*MRES > (unsigned) llim) imlim = ((unsigned) llim)/MRES; // 32bit mul and div should be faster #endif if (shtns->fftc_mode >= 0) { if (shtns->fftc_mode == 0) { // in-place V #ifdef HAVE_LIBFFTW3_OMP Q fftw_execute_dft(shtns->fftc,(cplx*)BrF, (cplx*)BrF); V fftw_execute_dft(shtns->fftc,(cplx*)BtF, (cplx*)BtF); V fftw_execute_dft(shtns->fftc,(cplx*)BpF, (cplx*)BpF); V #endif } else { // alloc memory for the transpose FFT unsigned long nv = shtns->nspat; QX BrF = (double*) VMALLOC( nv * sizeof(double) ); VX BtF = (double*) VMALLOC( 2*nv * sizeof(double) ); VX BpF = BtF + nv; 3 BrF = (double*) VMALLOC( 3*nv * sizeof(double) ); 3 BtF = BrF + nv; BpF = BtF + nv; V #ifdef HAVE_LIBFFTW3_OMP Q fftw_execute_split_dft(shtns->fftc, Vr+NPHI, Vr, BrF+1, BrF); V fftw_execute_split_dft(shtns->fftc, Vt+NPHI, Vt, BtF+1, BtF); V fftw_execute_split_dft(shtns->fftc, Vp+NPHI, Vp, BpF+1, BpF); V #endif } } #endif imlim += 1; #pragma omp parallel num_threads(shtns->nthreads) { #ifndef SHT_AXISYM V #ifndef HAVE_LIBFFTW3_OMP V if (shtns->fftc_mode == 0) { // in-place 3 #pragma omp single nowait 3 fftw_execute_dft(shtns->fftc,(cplx*)BrF, (cplx*)BrF); V #pragma omp single nowait V fftw_execute_dft(shtns->fftc,(cplx*)BtF, (cplx*)BtF); V #pragma omp single nowait V fftw_execute_dft(shtns->fftc,(cplx*)BpF, (cplx*)BpF); V } else if (shtns->fftc_mode > 0) { // split out-of-place 3 #pragma omp single nowait 3 fftw_execute_split_dft(shtns->fftc, Vr+NPHI, Vr, ((double*)BrF)+1, ((double*)BrF)); V #pragma omp single nowait V fftw_execute_split_dft(shtns->fftc, Vt+NPHI, Vt, ((double*)BtF)+1, ((double*)BtF)); V #pragma omp single nowait V fftw_execute_split_dft(shtns->fftc, Vp+NPHI, Vp, ((double*)BpF)+1, ((double*)BpF)); V } V #pragma omp barrier V #endif #endif QX GEN3(_an1,NWAY,SUFFIX)(shtns, BrF, Qlm, llim, imlim); VX GEN3(_an2,NWAY,SUFFIX)(shtns, BtF, BpF, Slm, Tlm, llim, imlim); 3 GEN3(_an3,NWAY,SUFFIX)(shtns, BrF, BtF, BpF, Qlm, Slm, Tlm, llim, imlim); } #ifndef SHT_AXISYM if (shtns->fftc_mode > 0) { // free memory Q VFREE(BrF); VX VFREE(BtF); // this frees also BpF. } #endif }
Flinng.h
#ifndef _FLING #define _FLING #include "LshFunctions.h" #include <cstdint> #include <iostream> #include <pybind11/numpy.h> #include <pybind11/pybind11.h> #include <stdexcept> #include <vector> // TODO: Add back 16 bit FLINNG, check input // TODO: Reproduce experiments // TODO: Add percent of srp used class Flinng { public: Flinng(uint64_t num_rows, uint64_t cells_per_row, uint64_t num_hashes, uint64_t hash_range) : num_rows(num_rows), cells_per_row(cells_per_row), num_hash_tables(num_hashes), hash_range(hash_range), inverted_flinng_index(hash_range * num_hashes), cell_membership(num_rows * cells_per_row) {} // All the hashes for point 1 come first, etc. // Size of hashes should be multiple of num_hash_tables void addPoints(std::vector<uint64_t> hashes) { uint64_t num_points = hashes.size() / num_hash_tables; std::vector<uint64_t> random_buckets(num_rows * num_points); for (uint64_t i = 0; i < num_rows * num_points; i++) { random_buckets[i] = (rand() % cells_per_row + cells_per_row) % cells_per_row + (i % num_rows) * cells_per_row; } #pragma omp parallel for for (uint64_t table = 0; table < num_hash_tables; table++) { for (uint64_t point = 0; point < num_points; point++) { uint64_t hash = hashes[point * num_hash_tables + table]; uint64_t hash_id = table * hash_range + hash; for (uint64_t row = 0; row < num_rows; row++) { inverted_flinng_index[hash_id].push_back( random_buckets[point * num_rows + row]); } } } for (uint64_t point = 0; point < num_points; point++) { for (uint64_t row = 0; row < num_rows; row++) { cell_membership[random_buckets[point * num_rows + row]].push_back( total_points_added + point); } } total_points_added += num_points; prepareForQueries(); } void prepareForQueries() { for (uint64_t i = 0; i < inverted_flinng_index.size(); i++) { std::sort(inverted_flinng_index[i].begin(), inverted_flinng_index[i].end()); inverted_flinng_index[i].erase( std::unique(inverted_flinng_index[i].begin(), inverted_flinng_index[i].end()), inverted_flinng_index[i].end()); } } // Again all the hashes for point 1 come first, etc. // Size of hashes should be multiple of num_hash_tables // Results are similarly ordered std::vector<uint64_t> query(std::vector<uint64_t> hashes, uint32_t top_k) { uint64_t num_queries = hashes.size() / num_hash_tables; std::vector<uint64_t> results(top_k * num_queries); #pragma omp parallel for for (uint32_t query_id = 0; query_id < num_queries; query_id++) { std::vector<uint32_t> counts(num_rows * cells_per_row, 0); for (uint32_t rep = 0; rep < num_hash_tables; rep++) { const uint32_t index = hash_range * rep + hashes[num_hash_tables * query_id + rep]; const uint32_t size = inverted_flinng_index[index].size(); for (uint32_t small_index = 0; small_index < size; small_index++) { // This single line takes 80% of the time, around half for the move // and half for the add ++counts[inverted_flinng_index[index][small_index]]; } } std::vector<uint32_t> sorted[num_hash_tables + 1]; uint32_t size_guess = num_rows * cells_per_row / (num_hash_tables + 1); for (std::vector<uint32_t> &v : sorted) { v.reserve(size_guess); } for (uint32_t i = 0; i < num_rows * cells_per_row; ++i) { sorted[counts[i]].push_back(i); } if (num_rows > 2) { std::vector<uint8_t> num_counts(total_points_added, 0); uint32_t num_found = 0; for (int32_t rep = num_hash_tables; rep >= 0; --rep) { for (uint32_t bin : sorted[rep]) { for (uint32_t point : cell_membership[bin]) { if (++num_counts[point] == num_rows) { results[top_k * query_id + num_found] = point; if (++num_found == top_k) { goto end_of_query; } } } } } } else { char *num_counts = (char *)calloc(total_points_added / 8 + 1, sizeof(char)); uint32_t num_found = 0; for (int32_t rep = num_hash_tables; rep >= 0; --rep) { for (uint32_t bin : sorted[rep]) { for (uint32_t point : cell_membership[bin]) { if (num_counts[(point / 8)] & (1 << (point % 8))) { results[top_k * query_id + num_found] = point; if (++num_found == top_k) { free(num_counts); goto end_of_query; } } else { num_counts[(point / 8)] |= (1 << (point % 8)); } } } } } end_of_query:; } return results; } private: const uint64_t num_rows, cells_per_row, num_hash_tables, hash_range; uint64_t total_points_added = 0; std::vector<std::vector<uint32_t>> inverted_flinng_index; std::vector<std::vector<uint64_t>> cell_membership; }; #endif
matmul_float.c
/* * Square matrix multiplication * A[N][N] * B[N][N] = C[N][N] * */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #define N 1024 //#define N 16 // read timer in second double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } void init(float **A) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = (float)rand()/(float)(RAND_MAX/10.0); } } } void matmul_simd(float **A, float **B, float **C) { int i,j,k; float temp; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { temp = 0; #pragma omp simd reduction(+:temp) for (k = 0; k < N; k++) { temp += A[i][k] * B[j][k]; } C[i][j] = temp; } } } // Debug functions void print_matrix(float **matrix) { for (int i = 0; i<8; i++) { printf("["); for (int j = 0; j<8; j++) { printf("%.2f ", matrix[i][j]); } puts("]"); } puts(""); } void matmul_serial(float **A, float **B, float **C) { int i,j,k; float temp; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { temp = 0; for (k = 0; k < N; k++) { temp += A[i][k] * B[j][k]; } C[i][j] = temp; } } } float check(float **A, float **B){ float difference = 0; for(int i = 0;i<N; i++){ for (int j = 0; j<N; j++) { difference += A[i][j]- B[i][j];} } return difference; } // Main int main(int argc, char *argv[]) { //Set everything up float **A = malloc(sizeof(float*)*N); float **B = malloc(sizeof(float*)*N); float **C_simd = malloc(sizeof(float*)*N); float **C_serial = malloc(sizeof(float*)*N); float **BT = malloc(sizeof(float*)*N); for (int i = 0; i<N; i++) { A[i] = malloc(sizeof(float)*N); B[i] = malloc(sizeof(float)*N); C_simd[i] = malloc(sizeof(float)*N); C_serial[i] = malloc(sizeof(float)*N); BT[i] = malloc(sizeof(float)*N); } srand(time(NULL)); init(A); init(B); for(int line = 0; line<N; line++){ for(int col = 0; col<N; col++){ BT[line][col] = B[col][line]; } } int i; int num_runs = 20; //Warming up matmul_simd(A, BT, C_simd); matmul_serial(A, BT, C_serial); double elapsed = 0; double elapsed1 = read_timer(); for (i=0; i<num_runs; i++) matmul_simd(A, BT, C_simd); elapsed += (read_timer() - elapsed1); double elapsed_serial = 0; double elapsed_serial1 = read_timer(); for (i=0; i<num_runs; i++) matmul_serial(A, BT, C_serial); elapsed_serial += (read_timer() - elapsed_serial1); print_matrix(A); print_matrix(BT); puts("=\n"); print_matrix(C_simd); puts("---------------------------------"); print_matrix(C_serial); double gflops_omp = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed)); double gflops_serial = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed_serial)); printf("======================================================================================================\n"); printf("\tMatrix Multiplication: A[N][N] * B[N][N] = C[N][N], N=%d\n", N); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (s)\t GFLOPS\n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("matmul_omp:\t\t%4f\t%4f\n", elapsed/num_runs, gflops_omp); printf("matmul_serial:\t\t%4f\t%4f\n", elapsed_serial/num_runs, gflops_serial); printf("Correctness check: %f\n", check(C_simd,C_serial)); return 0; }
omp-low.c
/* Lowering pass for OpenMP directives. Converts OpenMP directives into explicit calls to the runtime library (libgomp) and data marshalling to implement data sharing and copying clauses. Contributed by Diego Novillo <dnovillo@redhat.com> Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "gimple.h" #include "tree-iterator.h" #include "tree-inline.h" #include "langhooks.h" #include "diagnostic-core.h" #include "tree-flow.h" #include "timevar.h" #include "flags.h" #include "function.h" #include "expr.h" #include "tree-pass.h" #include "ggc.h" #include "except.h" #include "splay-tree.h" #include "optabs.h" #include "cfgloop.h" /* Lowering of OpenMP parallel and workshare constructs proceeds in two phases. The first phase scans the function looking for OMP statements and then for variables that must be replaced to satisfy data sharing clauses. The second phase expands code for the constructs, as well as re-gimplifying things when variables have been replaced with complex expressions. Final code generation is done by pass_expand_omp. The flowgraph is scanned for parallel regions which are then moved to a new function, to be invoked by the thread library. */ /* Context structure. Used to store information about each parallel directive in the code. */ typedef struct omp_context { /* This field must be at the beginning, as we do "inheritance": Some callback functions for tree-inline.c (e.g., omp_copy_decl) receive a copy_body_data pointer that is up-casted to an omp_context pointer. */ copy_body_data cb; /* The tree of contexts corresponding to the encountered constructs. */ struct omp_context *outer; gimple stmt; /* Map variables to fields in a structure that allows communication between sending and receiving threads. */ splay_tree field_map; tree record_type; tree sender_decl; tree receiver_decl; /* These are used just by task contexts, if task firstprivate fn is needed. srecord_type is used to communicate from the thread that encountered the task construct to task firstprivate fn, record_type is allocated by GOMP_task, initialized by task firstprivate fn and passed to the task body fn. */ splay_tree sfield_map; tree srecord_type; /* A chain of variables to add to the top-level block surrounding the construct. In the case of a parallel, this is in the child function. */ tree block_vars; /* What to do with variables with implicitly determined sharing attributes. */ enum omp_clause_default_kind default_kind; /* Nesting depth of this context. Used to beautify error messages re invalid gotos. The outermost ctx is depth 1, with depth 0 being reserved for the main body of the function. */ int depth; /* True if this parallel directive is nested within another. */ bool is_nested; } omp_context; struct omp_for_data_loop { tree v, n1, n2, step; enum tree_code cond_code; }; /* A structure describing the main elements of a parallel loop. */ struct omp_for_data { struct omp_for_data_loop loop; tree chunk_size; gimple for_stmt; tree pre, iter_type; int collapse; bool have_nowait, have_ordered; enum omp_clause_schedule_kind sched_kind; struct omp_for_data_loop *loops; }; static splay_tree all_contexts; static int taskreg_nesting_level; struct omp_region *root_omp_region; static bitmap task_shared_vars; static void scan_omp (gimple_seq, omp_context *); static tree scan_omp_1_op (tree *, int *, void *); #define WALK_SUBSTMTS \ case GIMPLE_BIND: \ case GIMPLE_TRY: \ case GIMPLE_CATCH: \ case GIMPLE_EH_FILTER: \ case GIMPLE_TRANSACTION: \ /* The sub-statements for these should be walked. */ \ *handled_ops_p = false; \ break; /* Convenience function for calling scan_omp_1_op on tree operands. */ static inline tree scan_omp_op (tree *tp, omp_context *ctx) { struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.info = ctx; wi.want_locations = true; return walk_tree (tp, scan_omp_1_op, &wi, NULL); } static void lower_omp (gimple_seq, omp_context *); static tree lookup_decl_in_outer_ctx (tree, omp_context *); static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *); /* Find an OpenMP clause of type KIND within CLAUSES. */ tree find_omp_clause (tree clauses, enum omp_clause_code kind) { for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses)) if (OMP_CLAUSE_CODE (clauses) == kind) return clauses; return NULL_TREE; } /* Return true if CTX is for an omp parallel. */ static inline bool is_parallel_ctx (omp_context *ctx) { return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL; } /* Return true if CTX is for an omp task. */ static inline bool is_task_ctx (omp_context *ctx) { return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK; } /* Return true if CTX is for an omp parallel or omp task. */ static inline bool is_taskreg_ctx (omp_context *ctx) { return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK; } /* Return true if REGION is a combined parallel+workshare region. */ static inline bool is_combined_parallel (struct omp_region *region) { return region->is_combined_parallel; } /* Extract the header elements of parallel loop FOR_STMT and store them into *FD. */ static void extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd, struct omp_for_data_loop *loops) { tree t, var, *collapse_iter, *collapse_count; tree count = NULL_TREE, iter_type = long_integer_type_node; struct omp_for_data_loop *loop; int i; struct omp_for_data_loop dummy_loop; location_t loc = gimple_location (for_stmt); fd->for_stmt = for_stmt; fd->pre = NULL; fd->collapse = gimple_omp_for_collapse (for_stmt); if (fd->collapse > 1) fd->loops = loops; else fd->loops = &fd->loop; fd->have_nowait = fd->have_ordered = false; fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC; fd->chunk_size = NULL_TREE; collapse_iter = NULL; collapse_count = NULL; for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t)) switch (OMP_CLAUSE_CODE (t)) { case OMP_CLAUSE_NOWAIT: fd->have_nowait = true; break; case OMP_CLAUSE_ORDERED: fd->have_ordered = true; break; case OMP_CLAUSE_SCHEDULE: fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t); fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t); break; case OMP_CLAUSE_COLLAPSE: if (fd->collapse > 1) { collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t); collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t); } default: break; } /* FIXME: for now map schedule(auto) to schedule(static). There should be analysis to determine whether all iterations are approximately the same amount of work (then schedule(static) is best) or if it varies (then schedule(dynamic,N) is better). */ if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO) { fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC; gcc_assert (fd->chunk_size == NULL); } gcc_assert (fd->collapse == 1 || collapse_iter != NULL); if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME) gcc_assert (fd->chunk_size == NULL); else if (fd->chunk_size == NULL) { /* We only need to compute a default chunk size for ordered static loops and dynamic loops. */ if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered || fd->collapse > 1) fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) ? integer_zero_node : integer_one_node; } for (i = 0; i < fd->collapse; i++) { if (fd->collapse == 1) loop = &fd->loop; else if (loops != NULL) loop = loops + i; else loop = &dummy_loop; loop->v = gimple_omp_for_index (for_stmt, i); gcc_assert (SSA_VAR_P (loop->v)); gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE); var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v; loop->n1 = gimple_omp_for_initial (for_stmt, i); loop->cond_code = gimple_omp_for_cond (for_stmt, i); loop->n2 = gimple_omp_for_final (for_stmt, i); switch (loop->cond_code) { case LT_EXPR: case GT_EXPR: break; case LE_EXPR: if (POINTER_TYPE_P (TREE_TYPE (loop->n2))) loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1); else loop->n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2, build_int_cst (TREE_TYPE (loop->n2), 1)); loop->cond_code = LT_EXPR; break; case GE_EXPR: if (POINTER_TYPE_P (TREE_TYPE (loop->n2))) loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1); else loop->n2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2, build_int_cst (TREE_TYPE (loop->n2), 1)); loop->cond_code = GT_EXPR; break; default: gcc_unreachable (); } t = gimple_omp_for_incr (for_stmt, i); gcc_assert (TREE_OPERAND (t, 0) == var); switch (TREE_CODE (t)) { case PLUS_EXPR: case POINTER_PLUS_EXPR: loop->step = TREE_OPERAND (t, 1); break; case MINUS_EXPR: loop->step = TREE_OPERAND (t, 1); loop->step = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (loop->step), loop->step); break; default: gcc_unreachable (); } if (iter_type != long_long_unsigned_type_node) { if (POINTER_TYPE_P (TREE_TYPE (loop->v))) iter_type = long_long_unsigned_type_node; else if (TYPE_UNSIGNED (TREE_TYPE (loop->v)) && TYPE_PRECISION (TREE_TYPE (loop->v)) >= TYPE_PRECISION (iter_type)) { tree n; if (loop->cond_code == LT_EXPR) n = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v), loop->n2, loop->step); else n = loop->n1; if (TREE_CODE (n) != INTEGER_CST || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n)) iter_type = long_long_unsigned_type_node; } else if (TYPE_PRECISION (TREE_TYPE (loop->v)) > TYPE_PRECISION (iter_type)) { tree n1, n2; if (loop->cond_code == LT_EXPR) { n1 = loop->n1; n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v), loop->n2, loop->step); } else { n1 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (loop->v), loop->n2, loop->step); n2 = loop->n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1) || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type))) iter_type = long_long_unsigned_type_node; } } if (collapse_count && *collapse_count == NULL) { if ((i == 0 || count != NULL_TREE) && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE && TREE_CONSTANT (loop->n1) && TREE_CONSTANT (loop->n2) && TREE_CODE (loop->step) == INTEGER_CST) { tree itype = TREE_TYPE (loop->v); if (POINTER_TYPE_P (itype)) itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0); t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1)); t = fold_build2_loc (loc, PLUS_EXPR, itype, fold_convert_loc (loc, itype, loop->step), t); t = fold_build2_loc (loc, PLUS_EXPR, itype, t, fold_convert_loc (loc, itype, loop->n2)); t = fold_build2_loc (loc, MINUS_EXPR, itype, t, fold_convert_loc (loc, itype, loop->n1)); if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR) t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, fold_build1_loc (loc, NEGATE_EXPR, itype, t), fold_build1_loc (loc, NEGATE_EXPR, itype, fold_convert_loc (loc, itype, loop->step))); else t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t, fold_convert_loc (loc, itype, loop->step)); t = fold_convert_loc (loc, long_long_unsigned_type_node, t); if (count != NULL_TREE) count = fold_build2_loc (loc, MULT_EXPR, long_long_unsigned_type_node, count, t); else count = t; if (TREE_CODE (count) != INTEGER_CST) count = NULL_TREE; } else count = NULL_TREE; } } if (count) { if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node))) iter_type = long_long_unsigned_type_node; else iter_type = long_integer_type_node; } else if (collapse_iter && *collapse_iter != NULL) iter_type = TREE_TYPE (*collapse_iter); fd->iter_type = iter_type; if (collapse_iter && *collapse_iter == NULL) *collapse_iter = create_tmp_var (iter_type, ".iter"); if (collapse_count && *collapse_count == NULL) { if (count) *collapse_count = fold_convert_loc (loc, iter_type, count); else *collapse_count = create_tmp_var (iter_type, ".count"); } if (fd->collapse > 1) { fd->loop.v = *collapse_iter; fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0); fd->loop.n2 = *collapse_count; fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1); fd->loop.cond_code = LT_EXPR; } } /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB is the immediate dominator of PAR_ENTRY_BB, return true if there are no data dependencies that would prevent expanding the parallel directive at PAR_ENTRY_BB as a combined parallel+workshare region. When expanding a combined parallel+workshare region, the call to the child function may need additional arguments in the case of GIMPLE_OMP_FOR regions. In some cases, these arguments are computed out of variables passed in from the parent to the child via 'struct .omp_data_s'. For instance: #pragma omp parallel for schedule (guided, i * 4) for (j ...) Is lowered into: # BLOCK 2 (PAR_ENTRY_BB) .omp_data_o.i = i; #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598) # BLOCK 3 (WS_ENTRY_BB) .omp_data_i = &.omp_data_o; D.1667 = .omp_data_i->i; D.1598 = D.1667 * 4; #pragma omp for schedule (guided, D.1598) When we outline the parallel region, the call to the child function 'bar.omp_fn.0' will need the value D.1598 in its argument list, but that value is computed *after* the call site. So, in principle we cannot do the transformation. To see whether the code in WS_ENTRY_BB blocks the combined parallel+workshare call, we collect all the variables used in the GIMPLE_OMP_FOR header check whether they appear on the LHS of any statement in WS_ENTRY_BB. If so, then we cannot emit the combined call. FIXME. If we had the SSA form built at this point, we could merely hoist the code in block 3 into block 2 and be done with it. But at this point we don't have dataflow information and though we could hack something up here, it is really not worth the aggravation. */ static bool workshare_safe_to_combine_p (basic_block ws_entry_bb) { struct omp_for_data fd; gimple ws_stmt = last_stmt (ws_entry_bb); if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS) return true; gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR); extract_omp_for_data (ws_stmt, &fd, NULL); if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST) return false; if (fd.iter_type != long_integer_type_node) return false; /* FIXME. We give up too easily here. If any of these arguments are not constants, they will likely involve variables that have been mapped into fields of .omp_data_s for sharing with the child function. With appropriate data flow, it would be possible to see through this. */ if (!is_gimple_min_invariant (fd.loop.n1) || !is_gimple_min_invariant (fd.loop.n2) || !is_gimple_min_invariant (fd.loop.step) || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size))) return false; return true; } /* Collect additional arguments needed to emit a combined parallel+workshare call. WS_STMT is the workshare directive being expanded. */ static VEC(tree,gc) * get_ws_args_for (gimple ws_stmt) { tree t; location_t loc = gimple_location (ws_stmt); VEC(tree,gc) *ws_args; if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR) { struct omp_for_data fd; extract_omp_for_data (ws_stmt, &fd, NULL); ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0)); t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1); VEC_quick_push (tree, ws_args, t); t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2); VEC_quick_push (tree, ws_args, t); t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step); VEC_quick_push (tree, ws_args, t); if (fd.chunk_size) { t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size); VEC_quick_push (tree, ws_args, t); } return ws_args; } else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS) { /* Number of sections is equal to the number of edges from the GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to the exit of the sections region. */ basic_block bb = single_succ (gimple_bb (ws_stmt)); t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1); ws_args = VEC_alloc (tree, gc, 1); VEC_quick_push (tree, ws_args, t); return ws_args; } gcc_unreachable (); } /* Discover whether REGION is a combined parallel+workshare region. */ static void determine_parallel_type (struct omp_region *region) { basic_block par_entry_bb, par_exit_bb; basic_block ws_entry_bb, ws_exit_bb; if (region == NULL || region->inner == NULL || region->exit == NULL || region->inner->exit == NULL || region->inner->cont == NULL) return; /* We only support parallel+for and parallel+sections. */ if (region->type != GIMPLE_OMP_PARALLEL || (region->inner->type != GIMPLE_OMP_FOR && region->inner->type != GIMPLE_OMP_SECTIONS)) return; /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and WS_EXIT_BB -> PAR_EXIT_BB. */ par_entry_bb = region->entry; par_exit_bb = region->exit; ws_entry_bb = region->inner->entry; ws_exit_bb = region->inner->exit; if (single_succ (par_entry_bb) == ws_entry_bb && single_succ (ws_exit_bb) == par_exit_bb && workshare_safe_to_combine_p (ws_entry_bb) && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb)) || (last_and_only_stmt (ws_entry_bb) && last_and_only_stmt (par_exit_bb)))) { gimple ws_stmt = last_stmt (ws_entry_bb); if (region->inner->type == GIMPLE_OMP_FOR) { /* If this is a combined parallel loop, we need to determine whether or not to use the combined library calls. There are two cases where we do not apply the transformation: static loops and any kind of ordered loop. In the first case, we already open code the loop so there is no need to do anything else. In the latter case, the combined parallel loop call would still need extra synchronization to implement ordered semantics, so there would not be any gain in using the combined call. */ tree clauses = gimple_omp_for_clauses (ws_stmt); tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE); if (c == NULL || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC || find_omp_clause (clauses, OMP_CLAUSE_ORDERED)) { region->is_combined_parallel = false; region->inner->is_combined_parallel = false; return; } } region->is_combined_parallel = true; region->inner->is_combined_parallel = true; region->ws_args = get_ws_args_for (ws_stmt); } } /* Return true if EXPR is variable sized. */ static inline bool is_variable_sized (const_tree expr) { return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr))); } /* Return true if DECL is a reference type. */ static inline bool is_reference (tree decl) { return lang_hooks.decls.omp_privatize_by_reference (decl); } /* Lookup variables in the decl or field splay trees. The "maybe" form allows for the variable form to not have been entered, otherwise we assert that the variable must have been entered. */ static inline tree lookup_decl (tree var, omp_context *ctx) { tree *n; n = (tree *) pointer_map_contains (ctx->cb.decl_map, var); return *n; } static inline tree maybe_lookup_decl (const_tree var, omp_context *ctx) { tree *n; n = (tree *) pointer_map_contains (ctx->cb.decl_map, var); return n ? *n : NULL_TREE; } static inline tree lookup_field (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); return (tree) n->value; } static inline tree lookup_sfield (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->sfield_map ? ctx->sfield_map : ctx->field_map, (splay_tree_key) var); return (tree) n->value; } static inline tree maybe_lookup_field (tree var, omp_context *ctx) { splay_tree_node n; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var); return n ? (tree) n->value : NULL_TREE; } /* Return true if DECL should be copied by pointer. SHARED_CTX is the parallel context if DECL is to be shared. */ static bool use_pointer_for_field (tree decl, omp_context *shared_ctx) { if (AGGREGATE_TYPE_P (TREE_TYPE (decl))) return true; /* We can only use copy-in/copy-out semantics for shared variables when we know the value is not accessible from an outer scope. */ if (shared_ctx) { /* ??? Trivially accessible from anywhere. But why would we even be passing an address in this case? Should we simply assert this to be false, or should we have a cleanup pass that removes these from the list of mappings? */ if (TREE_STATIC (decl) || DECL_EXTERNAL (decl)) return true; /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell without analyzing the expression whether or not its location is accessible to anyone else. In the case of nested parallel regions it certainly may be. */ if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl)) return true; /* Do not use copy-in/copy-out for variables that have their address taken. */ if (TREE_ADDRESSABLE (decl)) return true; /* lower_send_shared_vars only uses copy-in, but not copy-out for these. */ if (TREE_READONLY (decl) || ((TREE_CODE (decl) == RESULT_DECL || TREE_CODE (decl) == PARM_DECL) && DECL_BY_REFERENCE (decl))) return false; /* Disallow copy-in/out in nested parallel if decl is shared in outer parallel, otherwise each thread could store the shared variable in its own copy-in location, making the variable no longer really shared. */ if (shared_ctx->is_nested) { omp_context *up; for (up = shared_ctx->outer; up; up = up->outer) if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up)) break; if (up) { tree c; for (c = gimple_omp_taskreg_clauses (up->stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED && OMP_CLAUSE_DECL (c) == decl) break; if (c) goto maybe_mark_addressable_and_ret; } } /* For tasks avoid using copy-in/out. As tasks can be deferred or executed in different thread, when GOMP_task returns, the task hasn't necessarily terminated. */ if (is_task_ctx (shared_ctx)) { tree outer; maybe_mark_addressable_and_ret: outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx); if (is_gimple_reg (outer)) { /* Taking address of OUTER in lower_send_shared_vars might need regimplification of everything that uses the variable. */ if (!task_shared_vars) task_shared_vars = BITMAP_ALLOC (NULL); bitmap_set_bit (task_shared_vars, DECL_UID (outer)); TREE_ADDRESSABLE (outer) = 1; } return true; } } return false; } /* Create a new VAR_DECL and copy information from VAR to it. */ tree copy_var_decl (tree var, tree name, tree type) { tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type); TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var); TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var); DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var); DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var); DECL_IGNORED_P (copy) = DECL_IGNORED_P (var); DECL_CONTEXT (copy) = DECL_CONTEXT (var); TREE_USED (copy) = 1; DECL_SEEN_IN_BIND_EXPR_P (copy) = 1; return copy; } /* Construct a new automatic decl similar to VAR. */ static tree omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx) { tree copy = copy_var_decl (var, name, type); DECL_CONTEXT (copy) = current_function_decl; DECL_CHAIN (copy) = ctx->block_vars; ctx->block_vars = copy; return copy; } static tree omp_copy_decl_1 (tree var, omp_context *ctx) { return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx); } /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it as appropriate. */ static tree omp_build_component_ref (tree obj, tree field) { tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL); if (TREE_THIS_VOLATILE (field)) TREE_THIS_VOLATILE (ret) |= 1; if (TREE_READONLY (field)) TREE_READONLY (ret) |= 1; return ret; } /* Build tree nodes to access the field for VAR on the receiver side. */ static tree build_receiver_ref (tree var, bool by_ref, omp_context *ctx) { tree x, field = lookup_field (var, ctx); /* If the receiver record type was remapped in the child function, remap the field into the new record type. */ x = maybe_lookup_field (field, ctx); if (x != NULL) field = x; x = build_simple_mem_ref (ctx->receiver_decl); x = omp_build_component_ref (x, field); if (by_ref) x = build_simple_mem_ref (x); return x; } /* Build tree nodes to access VAR in the scope outer to CTX. In the case of a parallel, this is a component reference; for workshare constructs this is some variable. */ static tree build_outer_var_ref (tree var, omp_context *ctx) { tree x; if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))) x = var; else if (is_variable_sized (var)) { x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0); x = build_outer_var_ref (x, ctx); x = build_simple_mem_ref (x); } else if (is_taskreg_ctx (ctx)) { bool by_ref = use_pointer_for_field (var, NULL); x = build_receiver_ref (var, by_ref, ctx); } else if (ctx->outer) x = lookup_decl (var, ctx->outer); else if (is_reference (var)) /* This can happen with orphaned constructs. If var is reference, it is possible it is shared and as such valid. */ x = var; else gcc_unreachable (); if (is_reference (var)) x = build_simple_mem_ref (x); return x; } /* Build tree nodes to access the field for VAR on the sender side. */ static tree build_sender_ref (tree var, omp_context *ctx) { tree field = lookup_sfield (var, ctx); return omp_build_component_ref (ctx->sender_decl, field); } /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */ static void install_var_field (tree var, bool by_ref, int mask, omp_context *ctx) { tree field, type, sfield = NULL_TREE; gcc_assert ((mask & 1) == 0 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var)); gcc_assert ((mask & 2) == 0 || !ctx->sfield_map || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var)); type = TREE_TYPE (var); if (by_ref) type = build_pointer_type (type); else if ((mask & 3) == 1 && is_reference (var)) type = TREE_TYPE (type); field = build_decl (DECL_SOURCE_LOCATION (var), FIELD_DECL, DECL_NAME (var), type); /* Remember what variable this field was created for. This does have a side effect of making dwarf2out ignore this member, so for helpful debugging we clear it later in delete_omp_context. */ DECL_ABSTRACT_ORIGIN (field) = var; if (type == TREE_TYPE (var)) { DECL_ALIGN (field) = DECL_ALIGN (var); DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var); TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var); } else DECL_ALIGN (field) = TYPE_ALIGN (type); if ((mask & 3) == 3) { insert_field_into_struct (ctx->record_type, field); if (ctx->srecord_type) { sfield = build_decl (DECL_SOURCE_LOCATION (var), FIELD_DECL, DECL_NAME (var), type); DECL_ABSTRACT_ORIGIN (sfield) = var; DECL_ALIGN (sfield) = DECL_ALIGN (field); DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field); TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field); insert_field_into_struct (ctx->srecord_type, sfield); } } else { if (ctx->srecord_type == NULL_TREE) { tree t; ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE); ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t)) { sfield = build_decl (DECL_SOURCE_LOCATION (var), FIELD_DECL, DECL_NAME (t), TREE_TYPE (t)); DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t); insert_field_into_struct (ctx->srecord_type, sfield); splay_tree_insert (ctx->sfield_map, (splay_tree_key) DECL_ABSTRACT_ORIGIN (t), (splay_tree_value) sfield); } } sfield = field; insert_field_into_struct ((mask & 1) ? ctx->record_type : ctx->srecord_type, field); } if (mask & 1) splay_tree_insert (ctx->field_map, (splay_tree_key) var, (splay_tree_value) field); if ((mask & 2) && ctx->sfield_map) splay_tree_insert (ctx->sfield_map, (splay_tree_key) var, (splay_tree_value) sfield); } static tree install_var_local (tree var, omp_context *ctx) { tree new_var = omp_copy_decl_1 (var, ctx); insert_decl_map (&ctx->cb, var, new_var); return new_var; } /* Adjust the replacement for DECL in CTX for the new context. This means copying the DECL_VALUE_EXPR, and fixing up the type. */ static void fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug) { tree new_decl, size; new_decl = lookup_decl (decl, ctx); TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb); if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug) && DECL_HAS_VALUE_EXPR_P (decl)) { tree ve = DECL_VALUE_EXPR (decl); walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL); SET_DECL_VALUE_EXPR (new_decl, ve); DECL_HAS_VALUE_EXPR_P (new_decl) = 1; } if (!TREE_CONSTANT (DECL_SIZE (new_decl))) { size = remap_decl (DECL_SIZE (decl), &ctx->cb); if (size == error_mark_node) size = TYPE_SIZE (TREE_TYPE (new_decl)); DECL_SIZE (new_decl) = size; size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb); if (size == error_mark_node) size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl)); DECL_SIZE_UNIT (new_decl) = size; } } /* The callback for remap_decl. Search all containing contexts for a mapping of the variable; this avoids having to duplicate the splay tree ahead of time. We know a mapping doesn't already exist in the given context. Create new mappings to implement default semantics. */ static tree omp_copy_decl (tree var, copy_body_data *cb) { omp_context *ctx = (omp_context *) cb; tree new_var; if (TREE_CODE (var) == LABEL_DECL) { new_var = create_artificial_label (DECL_SOURCE_LOCATION (var)); DECL_CONTEXT (new_var) = current_function_decl; insert_decl_map (&ctx->cb, var, new_var); return new_var; } while (!is_taskreg_ctx (ctx)) { ctx = ctx->outer; if (ctx == NULL) return var; new_var = maybe_lookup_decl (var, ctx); if (new_var) return new_var; } if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn) return var; return error_mark_node; } /* Return the parallel region associated with STMT. */ /* Debugging dumps for parallel regions. */ void dump_omp_region (FILE *, struct omp_region *, int); void debug_omp_region (struct omp_region *); void debug_all_omp_regions (void); /* Dump the parallel region tree rooted at REGION. */ void dump_omp_region (FILE *file, struct omp_region *region, int indent) { fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index, gimple_code_name[region->type]); if (region->inner) dump_omp_region (file, region->inner, indent + 4); if (region->cont) { fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "", region->cont->index); } if (region->exit) fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "", region->exit->index); else fprintf (file, "%*s[no exit marker]\n", indent, ""); if (region->next) dump_omp_region (file, region->next, indent); } DEBUG_FUNCTION void debug_omp_region (struct omp_region *region) { dump_omp_region (stderr, region, 0); } DEBUG_FUNCTION void debug_all_omp_regions (void) { dump_omp_region (stderr, root_omp_region, 0); } /* Create a new parallel region starting at STMT inside region PARENT. */ struct omp_region * new_omp_region (basic_block bb, enum gimple_code type, struct omp_region *parent) { struct omp_region *region = XCNEW (struct omp_region); region->outer = parent; region->entry = bb; region->type = type; if (parent) { /* This is a nested region. Add it to the list of inner regions in PARENT. */ region->next = parent->inner; parent->inner = region; } else { /* This is a toplevel region. Add it to the list of toplevel regions in ROOT_OMP_REGION. */ region->next = root_omp_region; root_omp_region = region; } return region; } /* Release the memory associated with the region tree rooted at REGION. */ static void free_omp_region_1 (struct omp_region *region) { struct omp_region *i, *n; for (i = region->inner; i ; i = n) { n = i->next; free_omp_region_1 (i); } free (region); } /* Release the memory for the entire omp region tree. */ void free_omp_regions (void) { struct omp_region *r, *n; for (r = root_omp_region; r ; r = n) { n = r->next; free_omp_region_1 (r); } root_omp_region = NULL; } /* Create a new context, with OUTER_CTX being the surrounding context. */ static omp_context * new_omp_context (gimple stmt, omp_context *outer_ctx) { omp_context *ctx = XCNEW (omp_context); splay_tree_insert (all_contexts, (splay_tree_key) stmt, (splay_tree_value) ctx); ctx->stmt = stmt; if (outer_ctx) { ctx->outer = outer_ctx; ctx->cb = outer_ctx->cb; ctx->cb.block = NULL; ctx->depth = outer_ctx->depth + 1; } else { ctx->cb.src_fn = current_function_decl; ctx->cb.dst_fn = current_function_decl; ctx->cb.src_node = cgraph_get_node (current_function_decl); gcc_checking_assert (ctx->cb.src_node); ctx->cb.dst_node = ctx->cb.src_node; ctx->cb.src_cfun = cfun; ctx->cb.copy_decl = omp_copy_decl; ctx->cb.eh_lp_nr = 0; ctx->cb.transform_call_graph_edges = CB_CGE_MOVE; ctx->depth = 1; } ctx->cb.decl_map = pointer_map_create (); return ctx; } static gimple_seq maybe_catch_exception (gimple_seq); /* Finalize task copyfn. */ static void finalize_task_copyfn (gimple task_stmt) { struct function *child_cfun; tree child_fn, old_fn; gimple_seq seq, new_seq; gimple bind; child_fn = gimple_omp_task_copy_fn (task_stmt); if (child_fn == NULL_TREE) return; child_cfun = DECL_STRUCT_FUNCTION (child_fn); /* Inform the callgraph about the new function. */ DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties; old_fn = current_function_decl; push_cfun (child_cfun); current_function_decl = child_fn; bind = gimplify_body (child_fn, false); seq = gimple_seq_alloc (); gimple_seq_add_stmt (&seq, bind); new_seq = maybe_catch_exception (seq); if (new_seq != seq) { bind = gimple_build_bind (NULL, new_seq, NULL); seq = gimple_seq_alloc (); gimple_seq_add_stmt (&seq, bind); } gimple_set_body (child_fn, seq); pop_cfun (); current_function_decl = old_fn; cgraph_add_new_function (child_fn, false); } /* Destroy a omp_context data structures. Called through the splay tree value delete callback. */ static void delete_omp_context (splay_tree_value value) { omp_context *ctx = (omp_context *) value; pointer_map_destroy (ctx->cb.decl_map); if (ctx->field_map) splay_tree_delete (ctx->field_map); if (ctx->sfield_map) splay_tree_delete (ctx->sfield_map); /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before it produces corrupt debug information. */ if (ctx->record_type) { tree t; for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t)) DECL_ABSTRACT_ORIGIN (t) = NULL; } if (ctx->srecord_type) { tree t; for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t)) DECL_ABSTRACT_ORIGIN (t) = NULL; } if (is_task_ctx (ctx)) finalize_task_copyfn (ctx->stmt); XDELETE (ctx); } /* Fix up RECEIVER_DECL with a type that has been remapped to the child context. */ static void fixup_child_record_type (omp_context *ctx) { tree f, type = ctx->record_type; /* ??? It isn't sufficient to just call remap_type here, because variably_modified_type_p doesn't work the way we expect for record types. Testing each field for whether it needs remapping and creating a new record by hand works, however. */ for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f)) if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn)) break; if (f) { tree name, new_fields = NULL; type = lang_hooks.types.make_type (RECORD_TYPE); name = DECL_NAME (TYPE_NAME (ctx->record_type)); name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl), TYPE_DECL, name, type); TYPE_NAME (type) = name; for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f)) { tree new_f = copy_node (f); DECL_CONTEXT (new_f) = type; TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb); DECL_CHAIN (new_f) = new_fields; walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL); walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &ctx->cb, NULL); walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r, &ctx->cb, NULL); new_fields = new_f; /* Arrange to be able to look up the receiver field given the sender field. */ splay_tree_insert (ctx->field_map, (splay_tree_key) f, (splay_tree_value) new_f); } TYPE_FIELDS (type) = nreverse (new_fields); layout_type (type); } TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type); } /* Instantiate decls as necessary in CTX to satisfy the data sharing specified by CLAUSES. */ static void scan_sharing_clauses (tree clauses, omp_context *ctx) { tree c, decl; bool scan_array_reductions = false; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { bool by_ref; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: decl = OMP_CLAUSE_DECL (c); if (OMP_CLAUSE_PRIVATE_OUTER_REF (c)) goto do_private; else if (!is_variable_sized (decl)) install_var_local (decl, ctx); break; case OMP_CLAUSE_SHARED: gcc_assert (is_taskreg_ctx (ctx)); decl = OMP_CLAUSE_DECL (c); gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl)) || !is_variable_sized (decl)); /* Global variables don't need to be copied, the receiver side will use them directly. */ if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) break; by_ref = use_pointer_for_field (decl, ctx); if (! TREE_READONLY (decl) || TREE_ADDRESSABLE (decl) || by_ref || is_reference (decl)) { install_var_field (decl, by_ref, 3, ctx); install_var_local (decl, ctx); break; } /* We don't need to copy const scalar vars back. */ OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE); goto do_private; case OMP_CLAUSE_LASTPRIVATE: /* Let the corresponding firstprivate clause create the variable. */ if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_REDUCTION: decl = OMP_CLAUSE_DECL (c); do_private: if (is_variable_sized (decl)) { if (is_task_ctx (ctx)) install_var_field (decl, false, 1, ctx); break; } else if (is_taskreg_ctx (ctx)) { bool global = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)); by_ref = use_pointer_for_field (decl, NULL); if (is_task_ctx (ctx) && (global || by_ref || is_reference (decl))) { install_var_field (decl, false, 1, ctx); if (!global) install_var_field (decl, by_ref, 2, ctx); } else if (!global) install_var_field (decl, by_ref, 3, ctx); } install_var_local (decl, ctx); break; case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_COPYIN: decl = OMP_CLAUSE_DECL (c); by_ref = use_pointer_for_field (decl, NULL); install_var_field (decl, by_ref, 3, ctx); break; case OMP_CLAUSE_DEFAULT: ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c); break; case OMP_CLAUSE_FINAL: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: if (ctx->outer) scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer); break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_MERGEABLE: break; default: gcc_unreachable (); } } for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_LASTPRIVATE: /* Let the corresponding firstprivate clause create the variable. */ if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)) scan_array_reductions = true; if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_REDUCTION: decl = OMP_CLAUSE_DECL (c); if (is_variable_sized (decl)) install_var_local (decl, ctx); fixup_remapped_decl (decl, ctx, OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_PRIVATE_DEBUG (c)); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) scan_array_reductions = true; break; case OMP_CLAUSE_SHARED: decl = OMP_CLAUSE_DECL (c); if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))) fixup_remapped_decl (decl, ctx, false); break; case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_MERGEABLE: break; default: gcc_unreachable (); } } if (scan_array_reductions) for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx); scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx); } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)) scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx); } /* Create a new name for omp child function. Returns an identifier. */ static GTY(()) unsigned int tmp_ompfn_id_num; static tree create_omp_child_function_name (bool task_copy) { return (clone_function_name (current_function_decl, task_copy ? "_omp_cpyfn" : "_omp_fn")); } /* Build a decl for the omp child function. It'll not contain a body yet, just the bare decl. */ static void create_omp_child_function (omp_context *ctx, bool task_copy) { tree decl, type, name, t; name = create_omp_child_function_name (task_copy); if (task_copy) type = build_function_type_list (void_type_node, ptr_type_node, ptr_type_node, NULL_TREE); else type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type); if (!task_copy) ctx->cb.dst_fn = decl; else gimple_omp_task_set_copy_fn (ctx->stmt, decl); TREE_STATIC (decl) = 1; TREE_USED (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_NAMELESS (decl) = 1; DECL_IGNORED_P (decl) = 0; TREE_PUBLIC (decl) = 0; DECL_UNINLINABLE (decl) = 1; DECL_EXTERNAL (decl) = 0; DECL_CONTEXT (decl) = NULL_TREE; DECL_INITIAL (decl) = make_node (BLOCK); t = build_decl (DECL_SOURCE_LOCATION (decl), RESULT_DECL, NULL_TREE, void_type_node); DECL_ARTIFICIAL (t) = 1; DECL_IGNORED_P (t) = 1; DECL_CONTEXT (t) = decl; DECL_RESULT (decl) = t; t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node); DECL_ARTIFICIAL (t) = 1; DECL_NAMELESS (t) = 1; DECL_ARG_TYPE (t) = ptr_type_node; DECL_CONTEXT (t) = current_function_decl; TREE_USED (t) = 1; DECL_ARGUMENTS (decl) = t; if (!task_copy) ctx->receiver_decl = t; else { t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, get_identifier (".omp_data_o"), ptr_type_node); DECL_ARTIFICIAL (t) = 1; DECL_NAMELESS (t) = 1; DECL_ARG_TYPE (t) = ptr_type_node; DECL_CONTEXT (t) = current_function_decl; TREE_USED (t) = 1; TREE_ADDRESSABLE (t) = 1; DECL_CHAIN (t) = DECL_ARGUMENTS (decl); DECL_ARGUMENTS (decl) = t; } /* Allocate memory for the function structure. The call to allocate_struct_function clobbers CFUN, so we need to restore it afterward. */ push_struct_function (decl); cfun->function_end_locus = gimple_location (ctx->stmt); pop_cfun (); } /* Scan an OpenMP parallel directive. */ static void scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx) { omp_context *ctx; tree name; gimple stmt = gsi_stmt (*gsi); /* Ignore parallel directives with empty bodies, unless there are copyin clauses. */ if (optimize > 0 && empty_body_p (gimple_omp_body (stmt)) && find_omp_clause (gimple_omp_parallel_clauses (stmt), OMP_CLAUSE_COPYIN) == NULL) { gsi_replace (gsi, gimple_build_nop (), false); return; } ctx = new_omp_context (stmt, outer_ctx); if (taskreg_nesting_level > 1) ctx->is_nested = true; ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED; ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_data_s"); name = build_decl (gimple_location (stmt), TYPE_DECL, name, ctx->record_type); DECL_ARTIFICIAL (name) = 1; DECL_NAMELESS (name) = 1; TYPE_NAME (ctx->record_type) = name; create_omp_child_function (ctx, false); gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn); scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx); scan_omp (gimple_omp_body (stmt), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = ctx->receiver_decl = NULL; else { layout_type (ctx->record_type); fixup_child_record_type (ctx); } } /* Scan an OpenMP task directive. */ static void scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx) { omp_context *ctx; tree name, t; gimple stmt = gsi_stmt (*gsi); location_t loc = gimple_location (stmt); /* Ignore task directives with empty bodies. */ if (optimize > 0 && empty_body_p (gimple_omp_body (stmt))) { gsi_replace (gsi, gimple_build_nop (), false); return; } ctx = new_omp_context (stmt, outer_ctx); if (taskreg_nesting_level > 1) ctx->is_nested = true; ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED; ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_data_s"); name = build_decl (gimple_location (stmt), TYPE_DECL, name, ctx->record_type); DECL_ARTIFICIAL (name) = 1; DECL_NAMELESS (name) = 1; TYPE_NAME (ctx->record_type) = name; create_omp_child_function (ctx, false); gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn); scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx); if (ctx->srecord_type) { name = create_tmp_var_name (".omp_data_a"); name = build_decl (gimple_location (stmt), TYPE_DECL, name, ctx->srecord_type); DECL_ARTIFICIAL (name) = 1; DECL_NAMELESS (name) = 1; TYPE_NAME (ctx->srecord_type) = name; create_omp_child_function (ctx, true); } scan_omp (gimple_omp_body (stmt), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) { ctx->record_type = ctx->receiver_decl = NULL; t = build_int_cst (long_integer_type_node, 0); gimple_omp_task_set_arg_size (stmt, t); t = build_int_cst (long_integer_type_node, 1); gimple_omp_task_set_arg_align (stmt, t); } else { tree *p, vla_fields = NULL_TREE, *q = &vla_fields; /* Move VLA fields to the end. */ p = &TYPE_FIELDS (ctx->record_type); while (*p) if (!TYPE_SIZE_UNIT (TREE_TYPE (*p)) || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p)))) { *q = *p; *p = TREE_CHAIN (*p); TREE_CHAIN (*q) = NULL_TREE; q = &TREE_CHAIN (*q); } else p = &DECL_CHAIN (*p); *p = vla_fields; layout_type (ctx->record_type); fixup_child_record_type (ctx); if (ctx->srecord_type) layout_type (ctx->srecord_type); t = fold_convert_loc (loc, long_integer_type_node, TYPE_SIZE_UNIT (ctx->record_type)); gimple_omp_task_set_arg_size (stmt, t); t = build_int_cst (long_integer_type_node, TYPE_ALIGN_UNIT (ctx->record_type)); gimple_omp_task_set_arg_align (stmt, t); } } /* Scan an OpenMP loop directive. */ static void scan_omp_for (gimple stmt, omp_context *outer_ctx) { omp_context *ctx; size_t i; ctx = new_omp_context (stmt, outer_ctx); scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx); scan_omp (gimple_omp_for_pre_body (stmt), ctx); for (i = 0; i < gimple_omp_for_collapse (stmt); i++) { scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx); scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx); scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx); scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx); } scan_omp (gimple_omp_body (stmt), ctx); } /* Scan an OpenMP sections directive. */ static void scan_omp_sections (gimple stmt, omp_context *outer_ctx) { omp_context *ctx; ctx = new_omp_context (stmt, outer_ctx); scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx); scan_omp (gimple_omp_body (stmt), ctx); } /* Scan an OpenMP single directive. */ static void scan_omp_single (gimple stmt, omp_context *outer_ctx) { omp_context *ctx; tree name; ctx = new_omp_context (stmt, outer_ctx); ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0); ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE); name = create_tmp_var_name (".omp_copy_s"); name = build_decl (gimple_location (stmt), TYPE_DECL, name, ctx->record_type); TYPE_NAME (ctx->record_type) = name; scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx); scan_omp (gimple_omp_body (stmt), ctx); if (TYPE_FIELDS (ctx->record_type) == NULL) ctx->record_type = NULL; else layout_type (ctx->record_type); } /* Check OpenMP nesting restrictions. */ static bool check_omp_nesting_restrictions (gimple stmt, omp_context *ctx) { switch (gimple_code (stmt)) { case GIMPLE_OMP_FOR: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_CALL: for (; ctx != NULL; ctx = ctx->outer) switch (gimple_code (ctx->stmt)) { case GIMPLE_OMP_FOR: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_TASK: if (is_gimple_call (stmt)) { error_at (gimple_location (stmt), "barrier region may not be closely nested inside " "of work-sharing, critical, ordered, master or " "explicit task region"); return false; } error_at (gimple_location (stmt), "work-sharing region may not be closely nested inside " "of work-sharing, critical, ordered, master or explicit " "task region"); return false; case GIMPLE_OMP_PARALLEL: return true; default: break; } break; case GIMPLE_OMP_MASTER: for (; ctx != NULL; ctx = ctx->outer) switch (gimple_code (ctx->stmt)) { case GIMPLE_OMP_FOR: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_TASK: error_at (gimple_location (stmt), "master region may not be closely nested inside " "of work-sharing or explicit task region"); return false; case GIMPLE_OMP_PARALLEL: return true; default: break; } break; case GIMPLE_OMP_ORDERED: for (; ctx != NULL; ctx = ctx->outer) switch (gimple_code (ctx->stmt)) { case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_TASK: error_at (gimple_location (stmt), "ordered region may not be closely nested inside " "of critical or explicit task region"); return false; case GIMPLE_OMP_FOR: if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt), OMP_CLAUSE_ORDERED) == NULL) { error_at (gimple_location (stmt), "ordered region must be closely nested inside " "a loop region with an ordered clause"); return false; } return true; case GIMPLE_OMP_PARALLEL: return true; default: break; } break; case GIMPLE_OMP_CRITICAL: for (; ctx != NULL; ctx = ctx->outer) if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL && (gimple_omp_critical_name (stmt) == gimple_omp_critical_name (ctx->stmt))) { error_at (gimple_location (stmt), "critical region may not be nested inside a critical " "region with the same name"); return false; } break; default: break; } return true; } /* Helper function scan_omp. Callback for walk_tree or operators in walk_gimple_stmt used to scan for OpenMP directives in TP. */ static tree scan_omp_1_op (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = (struct walk_stmt_info *) data; omp_context *ctx = (omp_context *) wi->info; tree t = *tp; switch (TREE_CODE (t)) { case VAR_DECL: case PARM_DECL: case LABEL_DECL: case RESULT_DECL: if (ctx) *tp = remap_decl (t, &ctx->cb); break; default: if (ctx && TYPE_P (t)) *tp = remap_type (t, &ctx->cb); else if (!DECL_P (t)) { *walk_subtrees = 1; if (ctx) { tree tem = remap_type (TREE_TYPE (t), &ctx->cb); if (tem != TREE_TYPE (t)) { if (TREE_CODE (t) == INTEGER_CST) *tp = build_int_cst_wide (tem, TREE_INT_CST_LOW (t), TREE_INT_CST_HIGH (t)); else TREE_TYPE (t) = tem; } } } break; } return NULL_TREE; } /* Helper function for scan_omp. Callback for walk_gimple_stmt used to scan for OpenMP directives in the current statement in GSI. */ static tree scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p, struct walk_stmt_info *wi) { gimple stmt = gsi_stmt (*gsi); omp_context *ctx = (omp_context *) wi->info; if (gimple_has_location (stmt)) input_location = gimple_location (stmt); /* Check the OpenMP nesting restrictions. */ if (ctx != NULL) { bool remove = false; if (is_gimple_omp (stmt)) remove = !check_omp_nesting_restrictions (stmt, ctx); else if (is_gimple_call (stmt)) { tree fndecl = gimple_call_fndecl (stmt); if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER) remove = !check_omp_nesting_restrictions (stmt, ctx); } if (remove) { stmt = gimple_build_nop (); gsi_replace (gsi, stmt, false); } } *handled_ops_p = true; switch (gimple_code (stmt)) { case GIMPLE_OMP_PARALLEL: taskreg_nesting_level++; scan_omp_parallel (gsi, ctx); taskreg_nesting_level--; break; case GIMPLE_OMP_TASK: taskreg_nesting_level++; scan_omp_task (gsi, ctx); taskreg_nesting_level--; break; case GIMPLE_OMP_FOR: scan_omp_for (stmt, ctx); break; case GIMPLE_OMP_SECTIONS: scan_omp_sections (stmt, ctx); break; case GIMPLE_OMP_SINGLE: scan_omp_single (stmt, ctx); break; case GIMPLE_OMP_SECTION: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_CRITICAL: ctx = new_omp_context (stmt, ctx); scan_omp (gimple_omp_body (stmt), ctx); break; case GIMPLE_BIND: { tree var; *handled_ops_p = false; if (ctx) for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var)) insert_decl_map (&ctx->cb, var, var); } break; default: *handled_ops_p = false; break; } return NULL_TREE; } /* Scan all the statements starting at the current statement. CTX contains context information about the OpenMP directives and clauses found during the scan. */ static void scan_omp (gimple_seq body, omp_context *ctx) { location_t saved_location; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.info = ctx; wi.want_locations = true; saved_location = input_location; walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi); input_location = saved_location; } /* Re-gimplification and code generation routines. */ /* Build a call to GOMP_barrier. */ static tree build_omp_barrier (void) { return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0); } /* If a context was created for STMT when it was scanned, return it. */ static omp_context * maybe_lookup_ctx (gimple stmt) { splay_tree_node n; n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt); return n ? (omp_context *) n->value : NULL; } /* Find the mapping for DECL in CTX or the immediately enclosing context that has a mapping for DECL. If CTX is a nested parallel directive, we may have to use the decl mappings created in CTX's parent context. Suppose that we have the following parallel nesting (variable UIDs showed for clarity): iD.1562 = 0; #omp parallel shared(iD.1562) -> outer parallel iD.1562 = iD.1562 + 1; #omp parallel shared (iD.1562) -> inner parallel iD.1562 = iD.1562 - 1; Each parallel structure will create a distinct .omp_data_s structure for copying iD.1562 in/out of the directive: outer parallel .omp_data_s.1.i -> iD.1562 inner parallel .omp_data_s.2.i -> iD.1562 A shared variable mapping will produce a copy-out operation before the parallel directive and a copy-in operation after it. So, in this case we would have: iD.1562 = 0; .omp_data_o.1.i = iD.1562; #omp parallel shared(iD.1562) -> outer parallel .omp_data_i.1 = &.omp_data_o.1 .omp_data_i.1->i = .omp_data_i.1->i + 1; .omp_data_o.2.i = iD.1562; -> ** #omp parallel shared(iD.1562) -> inner parallel .omp_data_i.2 = &.omp_data_o.2 .omp_data_i.2->i = .omp_data_i.2->i - 1; ** This is a problem. The symbol iD.1562 cannot be referenced inside the body of the outer parallel region. But since we are emitting this copy operation while expanding the inner parallel directive, we need to access the CTX structure of the outer parallel directive to get the correct mapping: .omp_data_o.2.i = .omp_data_i.1->i Since there may be other workshare or parallel directives enclosing the parallel directive, it may be necessary to walk up the context parent chain. This is not a problem in general because nested parallelism happens only rarely. */ static tree lookup_decl_in_outer_ctx (tree decl, omp_context *ctx) { tree t; omp_context *up; for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer) t = maybe_lookup_decl (decl, up); gcc_assert (!ctx->is_nested || t || is_global_var (decl)); return t ? t : decl; } /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found in outer contexts. */ static tree maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx) { tree t = NULL; omp_context *up; for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer) t = maybe_lookup_decl (decl, up); return t ? t : decl; } /* Construct the initialization value for reduction CLAUSE. */ tree omp_reduction_init (tree clause, tree type) { location_t loc = OMP_CLAUSE_LOCATION (clause); switch (OMP_CLAUSE_REDUCTION_CODE (clause)) { case PLUS_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_XOR_EXPR: case NE_EXPR: return build_zero_cst (type); case MULT_EXPR: case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: case EQ_EXPR: return fold_convert_loc (loc, type, integer_one_node); case BIT_AND_EXPR: return fold_convert_loc (loc, type, integer_minus_one_node); case MAX_EXPR: if (SCALAR_FLOAT_TYPE_P (type)) { REAL_VALUE_TYPE max, min; if (HONOR_INFINITIES (TYPE_MODE (type))) { real_inf (&max); real_arithmetic (&min, NEGATE_EXPR, &max, NULL); } else real_maxval (&min, 1, TYPE_MODE (type)); return build_real (type, min); } else { gcc_assert (INTEGRAL_TYPE_P (type)); return TYPE_MIN_VALUE (type); } case MIN_EXPR: if (SCALAR_FLOAT_TYPE_P (type)) { REAL_VALUE_TYPE max; if (HONOR_INFINITIES (TYPE_MODE (type))) real_inf (&max); else real_maxval (&max, 0, TYPE_MODE (type)); return build_real (type, max); } else { gcc_assert (INTEGRAL_TYPE_P (type)); return TYPE_MAX_VALUE (type); } default: gcc_unreachable (); } } /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN, from the receiver (aka child) side and initializers for REFERENCE_TYPE private variables. Initialization statements go in ILIST, while calls to destructors go in DLIST. */ static void lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist, omp_context *ctx) { gimple_stmt_iterator diter; tree c, dtor, copyin_seq, x, ptr; bool copyin_by_ref = false; bool lastprivate_firstprivate = false; int pass; *dlist = gimple_seq_alloc (); diter = gsi_start (*dlist); copyin_seq = NULL; /* Do all the fixed sized types in the first pass, and the variable sized types in the second pass. This makes sure that the scalar arguments to the variable sized types are processed before we use them in the variable sized operations. */ for (pass = 0; pass < 2; ++pass) { for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c); tree var, new_var; bool by_ref; location_t clause_loc = OMP_CLAUSE_LOCATION (c); switch (c_kind) { case OMP_CLAUSE_PRIVATE: if (OMP_CLAUSE_PRIVATE_DEBUG (c)) continue; break; case OMP_CLAUSE_SHARED: if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL) { gcc_assert (is_global_var (OMP_CLAUSE_DECL (c))); continue; } case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_REDUCTION: break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) { lastprivate_firstprivate = true; if (pass != 0) continue; } break; default: continue; } new_var = var = OMP_CLAUSE_DECL (c); if (c_kind != OMP_CLAUSE_COPYIN) new_var = lookup_decl (var, ctx); if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN) { if (pass != 0) continue; } else if (is_variable_sized (var)) { /* For variable sized types, we need to allocate the actual storage here. Call alloca and store the result in the pointer decl that we created elsewhere. */ if (pass == 0) continue; if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx)) { gimple stmt; tree tmp, atmp; ptr = DECL_VALUE_EXPR (new_var); gcc_assert (TREE_CODE (ptr) == INDIRECT_REF); ptr = TREE_OPERAND (ptr, 0); gcc_assert (DECL_P (ptr)); x = TYPE_SIZE_UNIT (TREE_TYPE (new_var)); /* void *tmp = __builtin_alloca */ atmp = builtin_decl_explicit (BUILT_IN_ALLOCA); stmt = gimple_build_call (atmp, 1, x); tmp = create_tmp_var_raw (ptr_type_node, NULL); gimple_add_tmp_var (tmp); gimple_call_set_lhs (stmt, tmp); gimple_seq_add_stmt (ilist, stmt); x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp); gimplify_assign (ptr, x, ilist); } } else if (is_reference (var)) { /* For references that are being privatized for Fortran, allocate new backing storage for the new pointer variable. This allows us to avoid changing all the code that expects a pointer to something that expects a direct variable. Note that this doesn't apply to C++, since reference types are disallowed in data sharing clauses there, except for NRV optimized return values. */ if (pass == 0) continue; x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var))); if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx)) { x = build_receiver_ref (var, false, ctx); x = build_fold_addr_expr_loc (clause_loc, x); } else if (TREE_CONSTANT (x)) { const char *name = NULL; if (DECL_NAME (var)) name = IDENTIFIER_POINTER (DECL_NAME (new_var)); x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)), name); gimple_add_tmp_var (x); TREE_ADDRESSABLE (x) = 1; x = build_fold_addr_expr_loc (clause_loc, x); } else { tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA); x = build_call_expr_loc (clause_loc, atmp, 1, x); } x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x); gimplify_assign (new_var, x, ilist); new_var = build_simple_mem_ref_loc (clause_loc, new_var); } else if (c_kind == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { if (pass == 0) continue; } else if (pass != 0) continue; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: /* Shared global vars are just accessed directly. */ if (is_global_var (new_var)) break; /* Set up the DECL_VALUE_EXPR for shared variables now. This needs to be delayed until after fixup_child_record_type so that we get the correct type during the dereference. */ by_ref = use_pointer_for_field (var, ctx); x = build_receiver_ref (var, by_ref, ctx); SET_DECL_VALUE_EXPR (new_var, x); DECL_HAS_VALUE_EXPR_P (new_var) = 1; /* ??? If VAR is not passed by reference, and the variable hasn't been initialized yet, then we'll get a warning for the store into the omp_data_s structure. Ideally, we'd be able to notice this and not store anything at all, but we're generating code too early. Suppress the warning. */ if (!by_ref) TREE_NO_WARNING (var) = 1; break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) break; /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE) x = build_outer_var_ref (var, ctx); else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c)) { if (is_task_ctx (ctx)) x = build_receiver_ref (var, false, ctx); else x = build_outer_var_ref (var, ctx); } else x = NULL; x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x); if (x) gimplify_and_add (x, ilist); /* FALLTHRU */ do_dtor: x = lang_hooks.decls.omp_clause_dtor (c, new_var); if (x) { gimple_seq tseq = NULL; dtor = x; gimplify_stmt (&dtor, &tseq); gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT); } break; case OMP_CLAUSE_FIRSTPRIVATE: if (is_task_ctx (ctx)) { if (is_reference (var) || is_variable_sized (var)) goto do_dtor; else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)) || use_pointer_for_field (var, NULL)) { x = build_receiver_ref (var, false, ctx); SET_DECL_VALUE_EXPR (new_var, x); DECL_HAS_VALUE_EXPR_P (new_var) = 1; goto do_dtor; } } x = build_outer_var_ref (var, ctx); x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x); gimplify_and_add (x, ilist); goto do_dtor; break; case OMP_CLAUSE_COPYIN: by_ref = use_pointer_for_field (var, NULL); x = build_receiver_ref (var, by_ref, ctx); x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x); append_to_statement_list (x, &copyin_seq); copyin_by_ref |= by_ref; break; case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); x = build_outer_var_ref (var, ctx); if (is_reference (var)) x = build_fold_addr_expr_loc (clause_loc, x); SET_DECL_VALUE_EXPR (placeholder, x); DECL_HAS_VALUE_EXPR_P (placeholder) = 1; lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx); gimple_seq_add_seq (ilist, OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c)); OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL; DECL_HAS_VALUE_EXPR_P (placeholder) = 0; } else { x = omp_reduction_init (c, TREE_TYPE (new_var)); gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE); gimplify_assign (new_var, x, ilist); } break; default: gcc_unreachable (); } } } /* The copyin sequence is not to be executed by the main thread, since that would result in self-copies. Perhaps not visible to scalars, but it certainly is to C++ operator=. */ if (copyin_seq) { x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0); x = build2 (NE_EXPR, boolean_type_node, x, build_int_cst (TREE_TYPE (x), 0)); x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL); gimplify_and_add (x, ilist); } /* If any copyin variable is passed by reference, we must ensure the master thread doesn't modify it before it is copied over in all threads. Similarly for variables in both firstprivate and lastprivate clauses we need to ensure the lastprivate copying happens after firstprivate copying in all threads. */ if (copyin_by_ref || lastprivate_firstprivate) gimplify_and_add (build_omp_barrier (), ilist); } /* Generate code to implement the LASTPRIVATE clauses. This is used for both parallel and workshare constructs. PREDICATE may be NULL if it's always true. */ static void lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list, omp_context *ctx) { tree x, c, label = NULL; bool par_clauses = false; /* Early exit if there are no lastprivate clauses. */ clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE); if (clauses == NULL) { /* If this was a workshare clause, see if it had been combined with its parallel. In that case, look for the clauses on the parallel statement itself. */ if (is_parallel_ctx (ctx)) return; ctx = ctx->outer; if (ctx == NULL || !is_parallel_ctx (ctx)) return; clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt), OMP_CLAUSE_LASTPRIVATE); if (clauses == NULL) return; par_clauses = true; } if (predicate) { gimple stmt; tree label_true, arm1, arm2; label = create_artificial_label (UNKNOWN_LOCATION); label_true = create_artificial_label (UNKNOWN_LOCATION); arm1 = TREE_OPERAND (predicate, 0); arm2 = TREE_OPERAND (predicate, 1); gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue); gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue); stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2, label_true, label); gimple_seq_add_stmt (stmt_list, stmt); gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true)); } for (c = clauses; c ;) { tree var, new_var; location_t clause_loc = OMP_CLAUSE_LOCATION (c); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE) { var = OMP_CLAUSE_DECL (c); new_var = lookup_decl (var, ctx); if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)) { lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx); gimple_seq_add_seq (stmt_list, OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)); } OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL; x = build_outer_var_ref (var, ctx); if (is_reference (var)) new_var = build_simple_mem_ref_loc (clause_loc, new_var); x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var); gimplify_and_add (x, stmt_list); } c = OMP_CLAUSE_CHAIN (c); if (c == NULL && !par_clauses) { /* If this was a workshare clause, see if it had been combined with its parallel. In that case, continue looking for the clauses also on the parallel statement itself. */ if (is_parallel_ctx (ctx)) break; ctx = ctx->outer; if (ctx == NULL || !is_parallel_ctx (ctx)) break; c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt), OMP_CLAUSE_LASTPRIVATE); par_clauses = true; } } if (label) gimple_seq_add_stmt (stmt_list, gimple_build_label (label)); } /* Generate code to implement the REDUCTION clauses. */ static void lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx) { gimple_seq sub_seq = NULL; gimple stmt; tree x, c; int count = 0; /* First see if there is exactly one reduction clause. Use OMP_ATOMIC update in that case, otherwise use a lock. */ for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) { if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { /* Never use OMP_ATOMIC for array reductions. */ count = -1; break; } count++; } if (count == 0) return; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, ref, new_var; enum tree_code code; location_t clause_loc = OMP_CLAUSE_LOCATION (c); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION) continue; var = OMP_CLAUSE_DECL (c); new_var = lookup_decl (var, ctx); if (is_reference (var)) new_var = build_simple_mem_ref_loc (clause_loc, new_var); ref = build_outer_var_ref (var, ctx); code = OMP_CLAUSE_REDUCTION_CODE (c); /* reduction(-:var) sums up the partial results, so it acts identically to reduction(+:var). */ if (code == MINUS_EXPR) code = PLUS_EXPR; if (count == 1) { tree addr = build_fold_addr_expr_loc (clause_loc, ref); addr = save_expr (addr); ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr); x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var); x = build2 (OMP_ATOMIC, void_type_node, addr, x); gimplify_and_add (x, stmt_seqp); return; } if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); if (is_reference (var)) ref = build_fold_addr_expr_loc (clause_loc, ref); SET_DECL_VALUE_EXPR (placeholder, ref); DECL_HAS_VALUE_EXPR_P (placeholder) = 1; lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx); gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c)); OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL; } else { x = build2 (code, TREE_TYPE (ref), ref, new_var); ref = build_outer_var_ref (var, ctx); gimplify_assign (ref, x, &sub_seq); } } stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START), 0); gimple_seq_add_stmt (stmt_seqp, stmt); gimple_seq_add_seq (stmt_seqp, sub_seq); stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END), 0); gimple_seq_add_stmt (stmt_seqp, stmt); } /* Generate code to implement the COPYPRIVATE clauses. */ static void lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist, omp_context *ctx) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree var, new_var, ref, x; bool by_ref; location_t clause_loc = OMP_CLAUSE_LOCATION (c); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE) continue; var = OMP_CLAUSE_DECL (c); by_ref = use_pointer_for_field (var, NULL); ref = build_sender_ref (var, ctx); x = new_var = lookup_decl_in_outer_ctx (var, ctx); if (by_ref) { x = build_fold_addr_expr_loc (clause_loc, new_var); x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x); } gimplify_assign (ref, x, slist); ref = build_receiver_ref (var, false, ctx); if (by_ref) { ref = fold_convert_loc (clause_loc, build_pointer_type (TREE_TYPE (new_var)), ref); ref = build_fold_indirect_ref_loc (clause_loc, ref); } if (is_reference (var)) { ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref); ref = build_simple_mem_ref_loc (clause_loc, ref); new_var = build_simple_mem_ref_loc (clause_loc, new_var); } x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref); gimplify_and_add (x, rlist); } } /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE, and REDUCTION from the sender (aka parent) side. */ static void lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist, omp_context *ctx) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { tree val, ref, x, var; bool by_ref, do_in = false, do_out = false; location_t clause_loc = OMP_CLAUSE_LOCATION (c); switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: if (OMP_CLAUSE_PRIVATE_OUTER_REF (c)) break; continue; case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_REDUCTION: break; default: continue; } val = OMP_CLAUSE_DECL (c); var = lookup_decl_in_outer_ctx (val, ctx); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN && is_global_var (var)) continue; if (is_variable_sized (val)) continue; by_ref = use_pointer_for_field (val, NULL); switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: do_in = true; break; case OMP_CLAUSE_LASTPRIVATE: if (by_ref || is_reference (val)) { if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) continue; do_in = true; } else { do_out = true; if (lang_hooks.decls.omp_private_outer_ref (val)) do_in = true; } break; case OMP_CLAUSE_REDUCTION: do_in = true; do_out = !(by_ref || is_reference (val)); break; default: gcc_unreachable (); } if (do_in) { ref = build_sender_ref (val, ctx); x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var; gimplify_assign (ref, x, ilist); if (is_task_ctx (ctx)) DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL; } if (do_out) { ref = build_sender_ref (val, ctx); gimplify_assign (var, ref, olist); } } } /* Generate code to implement SHARED from the sender (aka parent) side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't list things that got automatically shared. */ static void lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx) { tree var, ovar, nvar, f, x, record_type; if (ctx->record_type == NULL) return; record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type; for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f)) { ovar = DECL_ABSTRACT_ORIGIN (f); nvar = maybe_lookup_decl (ovar, ctx); if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar)) continue; /* If CTX is a nested parallel directive. Find the immediately enclosing parallel or workshare construct that contains a mapping for OVAR. */ var = lookup_decl_in_outer_ctx (ovar, ctx); if (use_pointer_for_field (ovar, ctx)) { x = build_sender_ref (ovar, ctx); var = build_fold_addr_expr (var); gimplify_assign (x, var, ilist); } else { x = build_sender_ref (ovar, ctx); gimplify_assign (x, var, ilist); if (!TREE_READONLY (var) /* We don't need to receive a new reference to a result or parm decl. In fact we may not store to it as we will invalidate any pending RSO and generate wrong gimple during inlining. */ && !((TREE_CODE (var) == RESULT_DECL || TREE_CODE (var) == PARM_DECL) && DECL_BY_REFERENCE (var))) { x = build_sender_ref (ovar, ctx); gimplify_assign (var, x, olist); } } } } /* A convenience function to build an empty GIMPLE_COND with just the condition. */ static gimple gimple_build_cond_empty (tree cond) { enum tree_code pred_code; tree lhs, rhs; gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs); return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE); } /* Build the function calls to GOMP_parallel_start etc to actually generate the parallel operation. REGION is the parallel region being expanded. BB is the block where to insert the code. WS_ARGS will be set if this is a call to a combined parallel+workshare construct, it contains the list of additional arguments needed by the workshare construct. */ static void expand_parallel_call (struct omp_region *region, basic_block bb, gimple entry_stmt, VEC(tree,gc) *ws_args) { tree t, t1, t2, val, cond, c, clauses; gimple_stmt_iterator gsi; gimple stmt; enum built_in_function start_ix; int start_ix2; location_t clause_loc; VEC(tree,gc) *args; clauses = gimple_omp_parallel_clauses (entry_stmt); /* Determine what flavor of GOMP_parallel_start we will be emitting. */ start_ix = BUILT_IN_GOMP_PARALLEL_START; if (is_combined_parallel (region)) { switch (region->inner->type) { case GIMPLE_OMP_FOR: gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO); start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START + (region->inner->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME ? 3 : region->inner->sched_kind)); start_ix = (enum built_in_function)start_ix2; break; case GIMPLE_OMP_SECTIONS: start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START; break; default: gcc_unreachable (); } } /* By default, the value of NUM_THREADS is zero (selected at run time) and there is no conditional. */ cond = NULL_TREE; val = build_int_cst (unsigned_type_node, 0); c = find_omp_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS); if (c) { val = OMP_CLAUSE_NUM_THREADS_EXPR (c); clause_loc = OMP_CLAUSE_LOCATION (c); } else clause_loc = gimple_location (entry_stmt); /* Ensure 'val' is of the correct type. */ val = fold_convert_loc (clause_loc, unsigned_type_node, val); /* If we found the clause 'if (cond)', build either (cond != 0) or (cond ? val : 1u). */ if (cond) { gimple_stmt_iterator gsi; cond = gimple_boolify (cond); if (integer_zerop (val)) val = fold_build2_loc (clause_loc, EQ_EXPR, unsigned_type_node, cond, build_int_cst (TREE_TYPE (cond), 0)); else { basic_block cond_bb, then_bb, else_bb; edge e, e_then, e_else; tree tmp_then, tmp_else, tmp_join, tmp_var; tmp_var = create_tmp_var (TREE_TYPE (val), NULL); if (gimple_in_ssa_p (cfun)) { tmp_then = make_ssa_name (tmp_var, NULL); tmp_else = make_ssa_name (tmp_var, NULL); tmp_join = make_ssa_name (tmp_var, NULL); } else { tmp_then = tmp_var; tmp_else = tmp_var; tmp_join = tmp_var; } e = split_block (bb, NULL); cond_bb = e->src; bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb); stmt = gimple_build_cond_empty (cond); gsi = gsi_start_bb (cond_bb); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (then_bb); stmt = gimple_build_assign (tmp_then, val); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (else_bb); stmt = gimple_build_assign (tmp_else, build_int_cst (unsigned_type_node, 1)); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); e_then = make_edge (then_bb, bb, EDGE_FALLTHRU); e_else = make_edge (else_bb, bb, EDGE_FALLTHRU); if (gimple_in_ssa_p (cfun)) { gimple phi = create_phi_node (tmp_join, bb); SSA_NAME_DEF_STMT (tmp_join) = phi; add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION); add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION); } val = tmp_join; } gsi = gsi_start_bb (bb); val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } gsi = gsi_last_bb (bb); t = gimple_omp_parallel_data_arg (entry_stmt); if (t == NULL) t1 = null_pointer_node; else t1 = build_fold_addr_expr (t); t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt)); args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args)); VEC_quick_push (tree, args, t2); VEC_quick_push (tree, args, t1); VEC_quick_push (tree, args, val); VEC_splice (tree, args, ws_args); t = build_call_expr_loc_vec (UNKNOWN_LOCATION, builtin_decl_explicit (start_ix), args); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = gimple_omp_parallel_data_arg (entry_stmt); if (t == NULL) t = null_pointer_node; else t = build_fold_addr_expr (t); t = build_call_expr_loc (gimple_location (entry_stmt), gimple_omp_parallel_child_fn (entry_stmt), 1, t); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = build_call_expr_loc (gimple_location (entry_stmt), builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END), 0); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* Build the function call to GOMP_task to actually generate the task operation. BB is the block where to insert the code. */ static void expand_task_call (basic_block bb, gimple entry_stmt) { tree t, t1, t2, t3, flags, cond, c, c2, clauses; gimple_stmt_iterator gsi; location_t loc = gimple_location (entry_stmt); clauses = gimple_omp_task_clauses (entry_stmt); c = find_omp_clause (clauses, OMP_CLAUSE_IF); if (c) cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c)); else cond = boolean_true_node; c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED); c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE); flags = build_int_cst (unsigned_type_node, (c ? 1 : 0) + (c2 ? 4 : 0)); c = find_omp_clause (clauses, OMP_CLAUSE_FINAL); if (c) { c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c)); c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c, build_int_cst (unsigned_type_node, 2), build_int_cst (unsigned_type_node, 0)); flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c); } gsi = gsi_last_bb (bb); t = gimple_omp_task_data_arg (entry_stmt); if (t == NULL) t2 = null_pointer_node; else t2 = build_fold_addr_expr_loc (loc, t); t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt)); t = gimple_omp_task_copy_fn (entry_stmt); if (t == NULL) t3 = null_pointer_node; else t3 = build_fold_addr_expr_loc (loc, t); t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK), 7, t1, t2, t3, gimple_omp_task_arg_size (entry_stmt), gimple_omp_task_arg_align (entry_stmt), cond, flags); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW catch handler and return it. This prevents programs from violating the structured block semantics with throws. */ static gimple_seq maybe_catch_exception (gimple_seq body) { gimple g; tree decl; if (!flag_exceptions) return body; if (lang_hooks.eh_protect_cleanup_actions != NULL) decl = lang_hooks.eh_protect_cleanup_actions (); else decl = builtin_decl_explicit (BUILT_IN_TRAP); g = gimple_build_eh_must_not_throw (decl); g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_CATCH); return gimple_seq_alloc_with_stmt (g); } /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */ static tree vec2chain (VEC(tree,gc) *v) { tree chain = NULL_TREE, t; unsigned ix; FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t) { DECL_CHAIN (t) = chain; chain = t; } return chain; } /* Remove barriers in REGION->EXIT's block. Note that this is only valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be removed. */ static void remove_exit_barrier (struct omp_region *region) { gimple_stmt_iterator gsi; basic_block exit_bb; edge_iterator ei; edge e; gimple stmt; int any_addressable_vars = -1; exit_bb = region->exit; /* If the parallel region doesn't return, we don't have REGION->EXIT block at all. */ if (! exit_bb) return; /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of statements that can appear in between are extremely limited -- no memory operations at all. Here, we allow nothing at all, so the only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */ gsi = gsi_last_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); gsi_prev (&gsi); if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL) return; FOR_EACH_EDGE (e, ei, exit_bb->preds) { gsi = gsi_last_bb (e->src); if (gsi_end_p (gsi)) continue; stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_OMP_RETURN && !gimple_omp_return_nowait_p (stmt)) { /* OpenMP 3.0 tasks unfortunately prevent this optimization in many cases. If there could be tasks queued, the barrier might be needed to let the tasks run before some local variable of the parallel that the task uses as shared runs out of scope. The task can be spawned either from within current function (this would be easy to check) or from some function it calls and gets passed an address of such a variable. */ if (any_addressable_vars < 0) { gimple parallel_stmt = last_stmt (region->entry); tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt); tree local_decls, block, decl; unsigned ix; any_addressable_vars = 0; FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl) if (TREE_ADDRESSABLE (decl)) { any_addressable_vars = 1; break; } for (block = gimple_block (stmt); !any_addressable_vars && block && TREE_CODE (block) == BLOCK; block = BLOCK_SUPERCONTEXT (block)) { for (local_decls = BLOCK_VARS (block); local_decls; local_decls = DECL_CHAIN (local_decls)) if (TREE_ADDRESSABLE (local_decls)) { any_addressable_vars = 1; break; } if (block == gimple_block (parallel_stmt)) break; } } if (!any_addressable_vars) gimple_omp_return_set_nowait (stmt); } } } static void remove_exit_barriers (struct omp_region *region) { if (region->type == GIMPLE_OMP_PARALLEL) remove_exit_barrier (region); if (region->inner) { region = region->inner; remove_exit_barriers (region); while (region->next) { region = region->next; remove_exit_barriers (region); } } } /* Optimize omp_get_thread_num () and omp_get_num_threads () calls. These can't be declared as const functions, but within one parallel body they are constant, so they can be transformed there into __builtin_omp_get_{thread_num,num_threads} () which are declared const. Similarly for task body, except that in untied task omp_get_thread_num () can change at any task scheduling point. */ static void optimize_omp_library_calls (gimple entry_stmt) { basic_block bb; gimple_stmt_iterator gsi; tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree); tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree); bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK && find_omp_clause (gimple_omp_task_clauses (entry_stmt), OMP_CLAUSE_UNTIED) != NULL); FOR_EACH_BB (bb) for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple call = gsi_stmt (gsi); tree decl; if (is_gimple_call (call) && (decl = gimple_call_fndecl (call)) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl) && DECL_INITIAL (decl) == NULL) { tree built_in; if (DECL_NAME (decl) == thr_num_id) { /* In #pragma omp task untied omp_get_thread_num () can change during the execution of the task region. */ if (untied_task) continue; built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); } else if (DECL_NAME (decl) == num_thr_id) built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); else continue; if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in) || gimple_call_num_args (call) != 0) continue; if (flag_exceptions && !TREE_NOTHROW (decl)) continue; if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)), TREE_TYPE (TREE_TYPE (built_in)))) continue; gimple_call_set_fndecl (call, built_in); } } } /* Expand the OpenMP parallel or task directive starting at REGION. */ static void expand_omp_taskreg (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun; tree child_fn, block, t; tree save_current; gimple_stmt_iterator gsi; gimple entry_stmt, stmt; edge e; VEC(tree,gc) *ws_args; entry_stmt = last_stmt (region->entry); child_fn = gimple_omp_taskreg_child_fn (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); /* If this function has been already instrumented, make sure the child function isn't instrumented again. */ child_cfun->after_tree_profile = cfun->after_tree_profile; entry_bb = region->entry; exit_bb = region->exit; if (is_combined_parallel (region)) ws_args = region->ws_args; else ws_args = NULL; if (child_cfun->cfg) { /* Due to inlining, it may happen that we have already outlined the region, in which case all we need to do is make the sub-graph unreachable and emit the parallel call. */ edge entry_succ_e, exit_succ_e; gimple_stmt_iterator gsi; entry_succ_e = single_succ_edge (entry_bb); gsi = gsi_last_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK); gsi_remove (&gsi, true); new_bb = entry_bb; if (exit_bb) { exit_succ_e = single_succ_edge (exit_bb); make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU); } remove_edge_and_dominated_blocks (entry_succ_e); } else { unsigned srcidx, dstidx, num; /* If the parallel region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the parallel body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ if (gimple_omp_taskreg_data_arg (entry_stmt)) { basic_block entry_succ_bb = single_succ (entry_bb); gimple_stmt_iterator gsi; tree arg, narg; gimple parcopy_stmt = NULL; for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi)) { gimple stmt; gcc_assert (!gsi_end_p (gsi)); stmt = gsi_stmt (gsi); if (gimple_code (stmt) != GIMPLE_ASSIGN) continue; if (gimple_num_ops (stmt) == 2) { tree arg = gimple_assign_rhs1 (stmt); /* We're ignore the subcode because we're effectively doing a STRIP_NOPS. */ if (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) == gimple_omp_taskreg_data_arg (entry_stmt)) { parcopy_stmt = stmt; break; } } } gcc_assert (parcopy_stmt != NULL); arg = DECL_ARGUMENTS (child_fn); if (!gimple_in_ssa_p (cfun)) { if (gimple_assign_lhs (parcopy_stmt) == arg) gsi_remove (&gsi, true); else { /* ?? Is setting the subcode really necessary ?? */ gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg)); gimple_assign_set_rhs1 (parcopy_stmt, arg); } } else { /* If we are in ssa form, we must load the value from the default definition of the argument. That should not be defined now, since the argument is not used uninitialized. */ gcc_assert (gimple_default_def (cfun, arg) == NULL); narg = make_ssa_name (arg, gimple_build_nop ()); set_default_def (arg, narg); /* ?? Is setting the subcode really necessary ?? */ gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg)); gimple_assign_set_rhs1 (parcopy_stmt, narg); update_stmt (parcopy_stmt); } } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = vec2chain (child_cfun->local_decls); /* The gimplifier could record temporaries in parallel/task block rather than in containing function's local_decls chain, which would mean cgraph missed finalizing them. Do it now. */ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t)) if (TREE_CODE (t) == VAR_DECL && TREE_STATIC (t) && !DECL_EXTERNAL (t)) varpool_finalize_decl (t); DECL_SAVED_TREE (child_fn) = NULL; gimple_set_body (child_fn, bb_seq (single_succ (entry_bb))); TREE_USED (block) = 1; /* Reset DECL_CONTEXT on function arguments. */ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK, so that it can be moved to the child function. */ gsi = gsi_last_bb (entry_bb); stmt = gsi_stmt (gsi); gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL || gimple_code (stmt) == GIMPLE_OMP_TASK)); gsi_remove (&gsi, true); e = split_block (entry_bb, stmt); entry_bb = e->dest; single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */ if (exit_bb) { gsi = gsi_last_bb (exit_bb); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); stmt = gimple_build_return (NULL); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); } /* Move the parallel region into CHILD_CFUN. */ if (gimple_in_ssa_p (cfun)) { push_cfun (child_cfun); init_tree_ssa (child_cfun); init_ssa_operands (); cfun->gimple_df->in_ssa_p = true; pop_cfun (); block = NULL_TREE; } else block = gimple_block (entry_stmt); new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */ num = VEC_length (tree, child_cfun->local_decls); for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++) { t = VEC_index (tree, child_cfun->local_decls, srcidx); if (DECL_CONTEXT (t) == cfun->decl) continue; if (srcidx != dstidx) VEC_replace (tree, child_cfun->local_decls, dstidx, t); dstidx++; } if (dstidx != num) VEC_truncate (tree, child_cfun->local_decls, dstidx); /* Inform the callgraph about the new function. */ DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties; cgraph_add_new_function (child_fn, true); /* Fix the callgraph edges for child_cfun. Those for cfun will be fixed in a following pass. */ push_cfun (child_cfun); save_current = current_function_decl; current_function_decl = child_fn; if (optimize) optimize_omp_library_calls (entry_stmt); rebuild_cgraph_edges (); /* Some EH regions might become dead, see PR34608. If pass_cleanup_cfg isn't the first pass to happen with the new child, these dead EH edges might cause problems. Clean them up now. */ if (flag_exceptions) { basic_block bb; bool changed = false; FOR_EACH_BB (bb) changed |= gimple_purge_dead_eh_edges (bb); if (changed) cleanup_tree_cfg (); } if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa); current_function_decl = save_current; pop_cfun (); } /* Emit a library call to launch the children threads. */ if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL) expand_parallel_call (region, new_bb, entry_stmt, ws_args); else expand_task_call (new_bb, entry_stmt); update_ssa (TODO_update_ssa_only_virtuals); } /* A subroutine of expand_omp_for. Generate code for a parallel loop with any schedule. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; iend = iend0; L1: BODY; V += STEP; if (V cond iend) goto L1; else goto L2; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: If this is a combined omp parallel loop, instead of the call to GOMP_loop_foo_start, we call GOMP_loop_foo_next. For collapsed loops, given parameters: collapse(3) for (V1 = N11; V1 cond1 N12; V1 += STEP1) for (V2 = N21; V2 cond2 N22; V2 += STEP2) for (V3 = N31; V3 cond3 N32; V3 += STEP3) BODY; we generate pseudocode if (cond3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; count3 = (adj + N32 - N31) / STEP3; if (cond2 is <) adj = STEP2 - 1; else adj = STEP2 + 1; count2 = (adj + N22 - N21) / STEP2; if (cond1 is <) adj = STEP1 - 1; else adj = STEP1 + 1; count1 = (adj + N12 - N11) / STEP1; count = count1 * count2 * count3; more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; T = V; V3 = N31 + (T % count3) * STEP3; T = T / count3; V2 = N21 + (T % count2) * STEP2; T = T / count2; V1 = N11 + T * STEP1; iend = iend0; L1: BODY; V += 1; if (V < iend) goto L10; else goto L2; L10: V3 += STEP3; if (V3 cond3 N32) goto L1; else goto L11; L11: V3 = N31; V2 += STEP2; if (V2 cond2 N22) goto L1; else goto L12; L12: V2 = N21; V1 += STEP1; goto L1; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: */ static void expand_omp_for_generic (struct omp_region *region, struct omp_for_data *fd, enum built_in_function start_fn, enum built_in_function next_fn) { tree type, istart0, iend0, iend; tree t, vmain, vback, bias = NULL_TREE; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb; basic_block l2_bb = NULL, l3_bb = NULL; gimple_stmt_iterator gsi; gimple stmt; bool in_combined_parallel = is_combined_parallel (region); bool broken_loop = region->cont == NULL; edge e, ne; tree *counts = NULL; int i; gcc_assert (!broken_loop || !in_combined_parallel); gcc_assert (fd->iter_type == long_integer_type_node || !in_combined_parallel); type = TREE_TYPE (fd->loop.v); istart0 = create_tmp_var (fd->iter_type, ".istart0"); iend0 = create_tmp_var (fd->iter_type, ".iend0"); TREE_ADDRESSABLE (istart0) = 1; TREE_ADDRESSABLE (iend0) = 1; if (gimple_in_ssa_p (cfun)) { add_referenced_var (istart0); add_referenced_var (iend0); } /* See if we need to bias by LLONG_MIN. */ if (fd->iter_type == long_long_unsigned_type_node && TREE_CODE (type) == INTEGER_TYPE && !TYPE_UNSIGNED (type)) { tree n1, n2; if (fd->loop.cond_code == LT_EXPR) { n1 = fd->loop.n1; n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step); } else { n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step); n2 = fd->loop.n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0))) bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type)); } entry_bb = region->entry; cont_bb = region->cont; collapse_bb = NULL; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (broken_loop || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); l0_bb = split_edge (FALLTHRU_EDGE (entry_bb)); l1_bb = single_succ (l0_bb); if (!broken_loop) { l2_bb = create_empty_bb (cont_bb); gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } else l2_bb = NULL; l3_bb = BRANCH_EDGE (entry_bb)->dest; exit_bb = region->exit; gsi = gsi_last_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->collapse > 1) { /* collapsed loops need work for expansion in SSA form. */ gcc_assert (!gimple_in_ssa_p (cfun)); counts = (tree *) alloca (fd->collapse * sizeof (tree)); for (i = 0; i < fd->collapse; i++) { tree itype = TREE_TYPE (fd->loops[i].v); if (POINTER_TYPE_P (itype)) itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0); t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, fd->loops[i].step), t); t = fold_build2 (PLUS_EXPR, itype, t, fold_convert (itype, fd->loops[i].n2)); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loops[i].n1)); if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, fold_convert (itype, fd->loops[i].step))); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fold_convert (itype, fd->loops[i].step)); t = fold_convert (type, t); if (TREE_CODE (t) == INTEGER_CST) counts[i] = t; else { counts[i] = create_tmp_var (type, ".count"); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (counts[i], t); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); } if (SSA_VAR_P (fd->loop.n2)) { if (i == 0) t = counts[0]; else { t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); } stmt = gimple_build_assign (fd->loop.n2, t); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); } } } if (in_combined_parallel) { /* In a combined parallel loop, emit a call to GOMP_loop_foo_next. */ t = build_call_expr (builtin_decl_explicit (next_fn), 2, build_fold_addr_expr (istart0), build_fold_addr_expr (iend0)); } else { tree t0, t1, t2, t3, t4; /* If this is not a combined parallel loop, emit a call to GOMP_loop_foo_start in ENTRY_BB. */ t4 = build_fold_addr_expr (iend0); t3 = build_fold_addr_expr (istart0); t2 = fold_convert (fd->iter_type, fd->loop.step); if (POINTER_TYPE_P (type) && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type)) { /* Avoid casting pointers to integer of a different size. */ tree itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0); t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2)); t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1)); } else { t1 = fold_convert (fd->iter_type, fd->loop.n2); t0 = fold_convert (fd->iter_type, fd->loop.n1); } if (bias) { t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias); t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias); } if (fd->iter_type == long_integer_type_node) { if (fd->chunk_size) { t = fold_convert (fd->iter_type, fd->chunk_size); t = build_call_expr (builtin_decl_explicit (start_fn), 6, t0, t1, t2, t, t3, t4); } else t = build_call_expr (builtin_decl_explicit (start_fn), 5, t0, t1, t2, t3, t4); } else { tree t5; tree c_bool_type; tree bfn_decl; /* The GOMP_loop_ull_*start functions have additional boolean argument, true for < loops and false for > loops. In Fortran, the C bool type can be different from boolean_type_node. */ bfn_decl = builtin_decl_explicit (start_fn); c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl)); t5 = build_int_cst (c_bool_type, fd->loop.cond_code == LT_EXPR ? 1 : 0); if (fd->chunk_size) { tree bfn_decl = builtin_decl_explicit (start_fn); t = fold_convert (fd->iter_type, fd->chunk_size); t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4); } else t = build_call_expr (builtin_decl_explicit (start_fn), 6, t5, t0, t1, t2, t3, t4); } } if (TREE_TYPE (t) != boolean_type_node) t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); /* Iteration setup for sequential loop goes in L0_BB. */ gsi = gsi_start_bb (l0_bb); t = istart0; if (bias) t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias); if (POINTER_TYPE_P (type)) t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0), t); t = fold_convert (type, t); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loop.v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); t = iend0; if (bias) t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias); if (POINTER_TYPE_P (type)) t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0), t); t = fold_convert (type, t); iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (fd->collapse > 1) { tree tem = create_tmp_var (type, ".tem"); stmt = gimple_build_assign (tem, fd->loop.v); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); for (i = fd->collapse - 1; i >= 0; i--) { tree vtype = TREE_TYPE (fd->loops[i].v), itype; itype = vtype; if (POINTER_TYPE_P (vtype)) itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0); t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]); t = fold_convert (itype, t); t = fold_build2 (MULT_EXPR, itype, t, fold_convert (itype, fd->loops[i].step)); if (POINTER_TYPE_P (vtype)) t = fold_build_pointer_plus (fd->loops[i].n1, t); else t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i].v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); if (i != 0) { t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (tem, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); } } } if (!broken_loop) { /* Code to control the increment and predicate for the sequential loop goes in the CONT_BB. */ gsi = gsi_last_bb (cont_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (stmt); vback = gimple_omp_continue_control_def (stmt); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, fd->loop.step); else t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend); stmt = gimple_build_cond_empty (t); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); if (fd->collapse > 1) { basic_block last_bb, bb; last_bb = cont_bb; for (i = fd->collapse - 1; i >= 0; i--) { tree vtype = TREE_TYPE (fd->loops[i].v); bb = create_empty_bb (last_bb); gsi = gsi_start_bb (bb); if (i < fd->collapse - 1) { e = make_edge (last_bb, bb, EDGE_FALSE_VALUE); e->probability = REG_BR_PROB_BASE / 8; t = fd->loops[i + 1].n1; t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i + 1].v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); } else collapse_bb = bb; set_immediate_dominator (CDI_DOMINATORS, bb, last_bb); if (POINTER_TYPE_P (vtype)) t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step); else t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i].v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); if (i > 0) { t = fd->loops[i].n2; t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, fd->loops[i].v, t); stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE); e->probability = REG_BR_PROB_BASE * 7 / 8; } else make_edge (bb, l1_bb, EDGE_FALLTHRU); last_bb = bb; } } /* Emit code to get the next parallel iteration in L2_BB. */ gsi = gsi_start_bb (l2_bb); t = build_call_expr (builtin_decl_explicit (next_fn), 2, build_fold_addr_expr (istart0), build_fold_addr_expr (iend0)); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (TREE_TYPE (t) != boolean_type_node) t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); } /* Add the loop cleanup function. */ gsi = gsi_last_bb (exit_bb); if (gimple_omp_return_nowait_p (gsi_stmt (gsi))) t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT); else t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END); stmt = gimple_build_call (t, 0); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); /* Connect the new blocks. */ find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE; find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE; if (!broken_loop) { gimple_seq phis; e = find_edge (cont_bb, l3_bb); ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE); phis = phi_nodes (l3_bb); for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple phi = gsi_stmt (gsi); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne), PHI_ARG_DEF_FROM_EDGE (phi, e)); } remove_edge (e); make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE); if (fd->collapse > 1) { e = find_edge (cont_bb, l1_bb); remove_edge (e); e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else { e = find_edge (cont_bb, l1_bb); e->flags = EDGE_TRUE_VALUE; } e->probability = REG_BR_PROB_BASE * 7 / 8; find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8; make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE); set_immediate_dominator (CDI_DOMINATORS, l2_bb, recompute_dominator (CDI_DOMINATORS, l2_bb)); set_immediate_dominator (CDI_DOMINATORS, l3_bb, recompute_dominator (CDI_DOMINATORS, l3_bb)); set_immediate_dominator (CDI_DOMINATORS, l0_bb, recompute_dominator (CDI_DOMINATORS, l0_bb)); set_immediate_dominator (CDI_DOMINATORS, l1_bb, recompute_dominator (CDI_DOMINATORS, l1_bb)); } } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and no specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if (cond is <) adj = STEP - 1; else adj = STEP + 1; if ((__typeof (V)) -1 > 0 && cond is >) n = -(adj + N2 - N1) / -STEP; else n = (adj + N2 - N1) / STEP; q = n / nthreads; tt = n % nthreads; if (threadid < tt) goto L3; else goto L4; L3: tt = 0; q = q + 1; L4: s0 = q * threadid + tt; e0 = s0 + q; V = s0 * STEP + N1; if (s0 >= e0) goto L2; else goto L0; L0: e = e0 * STEP + N1; L1: BODY; V += STEP; if (V cond e) goto L1; L2: */ static void expand_omp_for_static_nochunk (struct omp_region *region, struct omp_for_data *fd) { tree n, q, s0, e0, e, t, tt, nthreads, threadid; tree type, itype, vmain, vback; basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb; basic_block body_bb, cont_bb; basic_block fin_bb; gimple_stmt_iterator gsi; gimple stmt; edge ep; itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0); entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb)); body_bb = single_succ (seq_start_bb); gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); fin_bb = FALLTHRU_EDGE (cont_bb)->dest; exit_bb = region->exit; /* Iteration space partitioning goes in ENTRY_BB. */ gsi = gsi_last_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0); t = fold_convert (itype, t); nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0); t = fold_convert (itype, t); threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); fd->loop.n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1), true, NULL_TREE, true, GSI_SAME_STMT); fd->loop.n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2), true, NULL_TREE, true, GSI_SAME_STMT); fd->loop.step = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step), true, NULL_TREE, true, GSI_SAME_STMT); t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t); t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1)); if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, fd->loop.step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step); t = fold_convert (itype, t); n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); q = create_tmp_var (itype, "q"); t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT); tt = create_tmp_var (itype, "tt"); t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT); t = build2 (LT_EXPR, boolean_type_node, threadid, tt); stmt = gimple_build_cond_empty (t); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); second_bb = split_block (entry_bb, stmt)->dest; gsi = gsi_last_bb (second_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)), GSI_SAME_STMT); stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q, build_int_cst (itype, 1)); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); third_bb = split_block (second_bb, stmt)->dest; gsi = gsi_last_bb (third_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); t = build2 (MULT_EXPR, itype, q, threadid); t = build2 (PLUS_EXPR, itype, t, tt); s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (PLUS_EXPR, itype, s0, q); e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = build2 (GE_EXPR, boolean_type_node, s0, e0); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); /* Setup code for sequential iteration goes in SEQ_START_BB. */ gsi = gsi_start_bb (seq_start_bb); t = fold_convert (itype, s0); t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loop.n1, t); else t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loop.v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); t = fold_convert (itype, e0); t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loop.n1, t); else t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1); e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); /* The code controlling the sequential loop replaces the GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_bb (cont_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (stmt); vback = gimple_omp_continue_control_def (stmt); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, fd->loop.step); else t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); t = build2 (fd->loop.cond_code, boolean_type_node, vback, e); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_CONTINUE statement. */ gsi_remove (&gsi, true); /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */ gsi = gsi_last_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (gsi))) force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE, false, GSI_SAME_STMT); gsi_remove (&gsi, true); /* Connect all the blocks. */ ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE); ep->probability = REG_BR_PROB_BASE / 4 * 3; ep = find_edge (entry_bb, second_bb); ep->flags = EDGE_TRUE_VALUE; ep->probability = REG_BR_PROB_BASE / 4; find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE; find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE; set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb); set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and a specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if (cond is <) adj = STEP - 1; else adj = STEP + 1; if ((__typeof (V)) -1 > 0 && cond is >) n = -(adj + N2 - N1) / -STEP; else n = (adj + N2 - N1) / STEP; trip = 0; V = threadid * CHUNK * STEP + N1; -- this extra definition of V is here so that V is defined if the loop is not entered L0: s0 = (trip * nthreads + threadid) * CHUNK; e0 = min(s0 + CHUNK, n); if (s0 < n) goto L1; else goto L4; L1: V = s0 * STEP + N1; e = e0 * STEP + N1; L2: BODY; V += STEP; if (V cond e) goto L2; else goto L3; L3: trip += 1; goto L0; L4: */ static void expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd) { tree n, s0, e0, e, t; tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid; tree type, itype, v_main, v_back, v_extra; basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb; basic_block trip_update_bb, cont_bb, fin_bb; gimple_stmt_iterator si; gimple stmt; edge se; itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0); entry_bb = region->entry; se = split_block (entry_bb, last_stmt (entry_bb)); entry_bb = se->src; iter_part_bb = se->dest; cont_bb = region->cont; gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2); gcc_assert (BRANCH_EDGE (iter_part_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb)); body_bb = single_succ (seq_start_bb); gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); fin_bb = FALLTHRU_EDGE (cont_bb)->dest; trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb)); exit_bb = region->exit; /* Trip and adjustment setup goes in ENTRY_BB. */ si = gsi_last_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR); t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0); t = fold_convert (itype, t); nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0); t = fold_convert (itype, t); threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); fd->loop.n1 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1), true, NULL_TREE, true, GSI_SAME_STMT); fd->loop.n2 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2), true, NULL_TREE, true, GSI_SAME_STMT); fd->loop.step = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step), true, NULL_TREE, true, GSI_SAME_STMT); fd->chunk_size = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size), true, NULL_TREE, true, GSI_SAME_STMT); t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t); t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1)); if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, fd->loop.step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step); t = fold_convert (itype, t); n = force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); trip_var = create_tmp_var (itype, ".trip"); if (gimple_in_ssa_p (cfun)) { add_referenced_var (trip_var); trip_init = make_ssa_name (trip_var, NULL); trip_main = make_ssa_name (trip_var, NULL); trip_back = make_ssa_name (trip_var, NULL); } else { trip_init = trip_var; trip_main = trip_var; trip_back = trip_var; } stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0)); gsi_insert_before (&si, stmt, GSI_SAME_STMT); t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size); t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loop.n1, t); else t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1); v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR. */ gsi_remove (&si, true); /* Iteration space partitioning goes in ITER_PART_BB. */ si = gsi_last_bb (iter_part_bb); t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads); t = fold_build2 (PLUS_EXPR, itype, t, threadid); t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size); s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size); t = fold_build2 (MIN_EXPR, itype, t, n); e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = build2 (LT_EXPR, boolean_type_node, s0, n); gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING); /* Setup code for sequential iteration goes in SEQ_START_BB. */ si = gsi_start_bb (seq_start_bb); t = fold_convert (itype, s0); t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loop.n1, t); else t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1); t = force_gimple_operand_gsi (&si, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loop.v, t); gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING); t = fold_convert (itype, e0); t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loop.n1, t); else t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1); e = force_gimple_operand_gsi (&si, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); /* The code controlling the sequential loop goes in CONT_BB, replacing the GIMPLE_OMP_CONTINUE. */ si = gsi_last_bb (cont_bb); stmt = gsi_stmt (si); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE); v_main = gimple_omp_continue_control_use (stmt); v_back = gimple_omp_continue_control_def (stmt); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (v_main, fd->loop.step); else t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step); stmt = gimple_build_assign (v_back, t); gsi_insert_before (&si, stmt, GSI_SAME_STMT); t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e); gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT); /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&si, true); /* Trip update code goes into TRIP_UPDATE_BB. */ si = gsi_start_bb (trip_update_bb); t = build_int_cst (itype, 1); t = build2 (PLUS_EXPR, itype, trip_main, t); stmt = gimple_build_assign (trip_back, t); gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING); /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */ si = gsi_last_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (si))) force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE, false, GSI_SAME_STMT); gsi_remove (&si, true); /* Connect the new blocks. */ find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE; find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE; find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE; redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb); if (gimple_in_ssa_p (cfun)) { gimple_stmt_iterator psi; gimple phi; edge re, ene; edge_var_map_vector head; edge_var_map *vm; size_t i; /* When we redirect the edge from trip_update_bb to iter_part_bb, we remove arguments of the phi nodes in fin_bb. We need to create appropriate phi nodes in iter_part_bb instead. */ se = single_pred_edge (fin_bb); re = single_succ_edge (trip_update_bb); head = redirect_edge_var_map_vector (re); ene = single_succ_edge (entry_bb); psi = gsi_start_phis (fin_bb); for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm); gsi_next (&psi), ++i) { gimple nphi; source_location locus; phi = gsi_stmt (psi); t = gimple_phi_result (phi); gcc_assert (t == redirect_edge_var_map_result (vm)); nphi = create_phi_node (t, iter_part_bb); SSA_NAME_DEF_STMT (t) = nphi; t = PHI_ARG_DEF_FROM_EDGE (phi, se); locus = gimple_phi_arg_location_from_edge (phi, se); /* A special case -- fd->loop.v is not yet computed in iter_part_bb, we need to use v_extra instead. */ if (t == fd->loop.v) t = v_extra; add_phi_arg (nphi, t, ene, locus); locus = redirect_edge_var_map_location (vm); add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus); } gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head)); redirect_edge_var_map_clear (re); while (1) { psi = gsi_start_phis (fin_bb); if (gsi_end_p (psi)) break; remove_phi_node (&psi, false); } /* Make phi node for trip. */ phi = create_phi_node (trip_main, iter_part_bb); SSA_NAME_DEF_STMT (trip_main) = phi; add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb), UNKNOWN_LOCATION); add_phi_arg (phi, trip_init, single_succ_edge (entry_bb), UNKNOWN_LOCATION); } set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb); set_immediate_dominator (CDI_DOMINATORS, iter_part_bb, recompute_dominator (CDI_DOMINATORS, iter_part_bb)); set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, recompute_dominator (CDI_DOMINATORS, seq_start_bb)); set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); } /* Expand the OpenMP loop defined by REGION. */ static void expand_omp_for (struct omp_region *region) { struct omp_for_data fd; struct omp_for_data_loop *loops; loops = (struct omp_for_data_loop *) alloca (gimple_omp_for_collapse (last_stmt (region->entry)) * sizeof (struct omp_for_data_loop)); extract_omp_for_data (last_stmt (region->entry), &fd, loops); region->sched_kind = fd.sched_kind; gcc_assert (EDGE_COUNT (region->entry->succs) == 2); BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL; FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL; if (region->cont) { gcc_assert (EDGE_COUNT (region->cont->succs) == 2); BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL; FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL; } if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd.have_ordered && fd.collapse == 1 && region->cont != NULL) { if (fd.chunk_size == NULL) expand_omp_for_static_nochunk (region, &fd); else expand_omp_for_static_chunk (region, &fd); } else { int fn_index, start_ix, next_ix; if (fd.chunk_size == NULL && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) fd.chunk_size = integer_zero_node; gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO); fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME) ? 3 : fd.sched_kind; fn_index += fd.have_ordered * 4; start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index; next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index; if (fd.iter_type == long_long_unsigned_type_node) { start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START - (int)BUILT_IN_GOMP_LOOP_STATIC_START); next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT); } expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix, (enum built_in_function) next_ix); } update_ssa (TODO_update_ssa_only_virtuals); } /* Expand code for an OpenMP sections directive. In pseudo code, we generate v = GOMP_sections_start (n); L0: switch (v) { case 0: goto L2; case 1: section 1; goto L1; case 2: ... case n: ... default: abort (); } L1: v = GOMP_sections_next (); goto L0; L2: reduction; If this is a combined parallel sections, replace the call to GOMP_sections_start with call to GOMP_sections_next. */ static void expand_omp_sections (struct omp_region *region) { tree t, u, vin = NULL, vmain, vnext, l2; VEC (tree,heap) *label_vec; unsigned len; basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb; gimple_stmt_iterator si, switch_si; gimple sections_stmt, stmt, cont; edge_iterator ei; edge e; struct omp_region *inner; unsigned i, casei; bool exit_reachable = region->cont != NULL; gcc_assert (region->exit != NULL); entry_bb = region->entry; l0_bb = single_succ (entry_bb); l1_bb = region->cont; l2_bb = region->exit; if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb) l2 = gimple_block_label (l2_bb); else { /* This can happen if there are reductions. */ len = EDGE_COUNT (l0_bb->succs); gcc_assert (len > 0); e = EDGE_SUCC (l0_bb, len - 1); si = gsi_last_bb (e->dest); l2 = NULL_TREE; if (gsi_end_p (si) || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION) l2 = gimple_block_label (e->dest); else FOR_EACH_EDGE (e, ei, l0_bb->succs) { si = gsi_last_bb (e->dest); if (gsi_end_p (si) || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION) { l2 = gimple_block_label (e->dest); break; } } } if (exit_reachable) default_bb = create_empty_bb (l1_bb->prev_bb); else default_bb = create_empty_bb (l0_bb); /* We will build a switch() with enough cases for all the GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work and a default case to abort if something goes wrong. */ len = EDGE_COUNT (l0_bb->succs); /* Use VEC_quick_push on label_vec throughout, since we know the size in advance. */ label_vec = VEC_alloc (tree, heap, len); /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the GIMPLE_OMP_SECTIONS statement. */ si = gsi_last_bb (entry_bb); sections_stmt = gsi_stmt (si); gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS); vin = gimple_omp_sections_control (sections_stmt); if (!is_combined_parallel (region)) { /* If we are not inside a combined parallel+sections region, call GOMP_sections_start. */ t = build_int_cst (unsigned_type_node, exit_reachable ? len - 1 : len); u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START); stmt = gimple_build_call (u, 1, t); } else { /* Otherwise, call GOMP_sections_next. */ u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT); stmt = gimple_build_call (u, 0); } gimple_call_set_lhs (stmt, vin); gsi_insert_after (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in L0_BB. */ switch_si = gsi_last_bb (l0_bb); gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH); if (exit_reachable) { cont = last_stmt (l1_bb); gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont); vnext = gimple_omp_continue_control_def (cont); } else { vmain = vin; vnext = NULL_TREE; } t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2); VEC_quick_push (tree, label_vec, t); i = 1; /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */ for (inner = region->inner, casei = 1; inner; inner = inner->next, i++, casei++) { basic_block s_entry_bb, s_exit_bb; /* Skip optional reduction region. */ if (inner->type == GIMPLE_OMP_ATOMIC_LOAD) { --i; --casei; continue; } s_entry_bb = inner->entry; s_exit_bb = inner->exit; t = gimple_block_label (s_entry_bb); u = build_int_cst (unsigned_type_node, casei); u = build_case_label (u, NULL, t); VEC_quick_push (tree, label_vec, u); si = gsi_last_bb (s_entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION); gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si))); gsi_remove (&si, true); single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU; if (s_exit_bb == NULL) continue; si = gsi_last_bb (s_exit_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN); gsi_remove (&si, true); single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU; } /* Error handling code goes in DEFAULT_BB. */ t = gimple_block_label (default_bb); u = build_case_label (NULL, NULL, t); make_edge (l0_bb, default_bb, 0); stmt = gimple_build_switch_vec (vmain, u, label_vec); gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT); gsi_remove (&switch_si, true); VEC_free (tree, heap, label_vec); si = gsi_start_bb (default_bb); stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0); gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING); if (exit_reachable) { tree bfn_decl; /* Code to get the next section goes in L1_BB. */ si = gsi_last_bb (l1_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE); bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT); stmt = gimple_build_call (bfn_decl, 0); gimple_call_set_lhs (stmt, vnext); gsi_insert_after (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU; } /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */ si = gsi_last_bb (l2_bb); if (gimple_omp_return_nowait_p (gsi_stmt (si))) t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT); else t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END); stmt = gimple_build_call (t, 0); gsi_insert_after (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb); } /* Expand code for an OpenMP single directive. We've already expanded much of the code, here we simply place the GOMP_barrier call. */ static void expand_omp_single (struct omp_region *region) { basic_block entry_bb, exit_bb; gimple_stmt_iterator si; bool need_barrier = false; entry_bb = region->entry; exit_bb = region->exit; si = gsi_last_bb (entry_bb); /* The terminal barrier at the end of a GOMP_single_copy sequence cannot be removed. We need to ensure that the thread that entered the single does not exit before the data is copied out by the other threads. */ if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)), OMP_CLAUSE_COPYPRIVATE)) need_barrier = true; gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE); gsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; si = gsi_last_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier) force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE, false, GSI_SAME_STMT); gsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } /* Generic expansion for OpenMP synchronization directives: master, ordered and critical. All we need to do here is remove the entry and exit markers for REGION. */ static void expand_omp_synch (struct omp_region *region) { basic_block entry_bb, exit_bb; gimple_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = gsi_last_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL); gsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; if (exit_bb) { si = gsi_last_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN); gsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a normal volatile load. */ static bool expand_omp_atomic_load (basic_block load_bb, tree addr, tree loaded_val, int index) { enum built_in_function tmpbase; gimple_stmt_iterator gsi; basic_block store_bb; location_t loc; gimple stmt; tree decl, call, type, itype; gsi = gsi_last_bb (load_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD); loc = gimple_location (stmt); /* ??? If the target does not implement atomic_load_optab[mode], and mode is smaller than word size, then expand_atomic_load assumes that the load is atomic. We could avoid the builtin entirely in this case. */ tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; type = TREE_TYPE (loaded_val); itype = TREE_TYPE (TREE_TYPE (decl)); call = build_call_expr_loc (loc, decl, 2, addr, build_int_cst (NULL, MEMMODEL_RELAXED)); if (!useless_type_conversion_p (type, itype)) call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call); force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); store_bb = single_succ (load_bb); gsi = gsi_last_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a normal volatile store. */ static bool expand_omp_atomic_store (basic_block load_bb, tree addr, tree loaded_val, tree stored_val, int index) { enum built_in_function tmpbase; gimple_stmt_iterator gsi; basic_block store_bb = single_succ (load_bb); location_t loc; gimple stmt; tree decl, call, type, itype; enum machine_mode imode; bool exchange; gsi = gsi_last_bb (load_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD); /* If the load value is needed, then this isn't a store but an exchange. */ exchange = gimple_omp_atomic_need_value_p (stmt); gsi = gsi_last_bb (store_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE); loc = gimple_location (stmt); /* ??? If the target does not implement atomic_store_optab[mode], and mode is smaller than word size, then expand_atomic_store assumes that the store is atomic. We could avoid the builtin entirely in this case. */ tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N); tmpbase = (enum built_in_function) ((int) tmpbase + index + 1); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; type = TREE_TYPE (stored_val); /* Dig out the type of the function's second argument. */ itype = TREE_TYPE (decl); itype = TYPE_ARG_TYPES (itype); itype = TREE_CHAIN (itype); itype = TREE_VALUE (itype); imode = TYPE_MODE (itype); if (exchange && !can_atomic_exchange_p (imode, true)) return false; if (!useless_type_conversion_p (itype, type)) stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val); call = build_call_expr_loc (loc, decl, 3, addr, stored_val, build_int_cst (NULL, MEMMODEL_RELAXED)); if (exchange) { if (!useless_type_conversion_p (type, itype)) call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call); } force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */ gsi = gsi_last_bb (load_bb); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a __atomic_fetch_op builtin. INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. Returns false if the expression is not of the proper form. */ static bool expand_omp_atomic_fetch_op (basic_block load_bb, tree addr, tree loaded_val, tree stored_val, int index) { enum built_in_function oldbase, newbase, tmpbase; tree decl, itype, call; tree lhs, rhs; basic_block store_bb = single_succ (load_bb); gimple_stmt_iterator gsi; gimple stmt; location_t loc; enum tree_code code; bool need_old, need_new; enum machine_mode imode; /* We expect to find the following sequences: load_bb: GIMPLE_OMP_ATOMIC_LOAD (tmp, mem) store_bb: val = tmp OP something; (or: something OP tmp) GIMPLE_OMP_STORE (val) ???FIXME: Allow a more flexible sequence. Perhaps use data flow to pick the statements. */ gsi = gsi_after_labels (store_bb); stmt = gsi_stmt (gsi); loc = gimple_location (stmt); if (!is_gimple_assign (stmt)) return false; gsi_next (&gsi); if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE) return false; need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi)); need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb)); gcc_checking_assert (!need_old || !need_new); if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0)) return false; /* Check for one of the supported fetch-op operations. */ code = gimple_assign_rhs_code (stmt); switch (code) { case PLUS_EXPR: case POINTER_PLUS_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N; newbase = BUILT_IN_ATOMIC_ADD_FETCH_N; break; case MINUS_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N; newbase = BUILT_IN_ATOMIC_SUB_FETCH_N; break; case BIT_AND_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_AND_N; newbase = BUILT_IN_ATOMIC_AND_FETCH_N; break; case BIT_IOR_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_OR_N; newbase = BUILT_IN_ATOMIC_OR_FETCH_N; break; case BIT_XOR_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N; newbase = BUILT_IN_ATOMIC_XOR_FETCH_N; break; default: return false; } /* Make sure the expression is of the proper form. */ if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0)) rhs = gimple_assign_rhs2 (stmt); else if (commutative_tree_code (gimple_assign_rhs_code (stmt)) && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0)) rhs = gimple_assign_rhs1 (stmt); else return false; tmpbase = ((enum built_in_function) ((need_new ? newbase : oldbase) + index + 1)); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; itype = TREE_TYPE (TREE_TYPE (decl)); imode = TYPE_MODE (itype); /* We could test all of the various optabs involved, but the fact of the matter is that (with the exception of i486 vs i586 and xadd) all targets that support any atomic operaton optab also implements compare-and-swap. Let optabs.c take care of expanding any compare-and-swap loop. */ if (!can_compare_and_swap_p (imode, true)) return false; gsi = gsi_last_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD); /* OpenMP does not imply any barrier-like semantics on its atomic ops. It only requires that the operation happen atomically. Thus we can use the RELAXED memory model. */ call = build_call_expr_loc (loc, decl, 3, addr, fold_convert_loc (loc, itype, rhs), build_int_cst (NULL, MEMMODEL_RELAXED)); if (need_old || need_new) { lhs = need_old ? loaded_val : stored_val; call = fold_convert_loc (loc, TREE_TYPE (lhs), call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call); } else call = fold_convert_loc (loc, void_type_node, call); force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); gsi = gsi_last_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE); gsi_remove (&gsi, true); gsi = gsi_last_bb (store_bb); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Implement the atomic operation as: oldval = *addr; repeat: newval = rhs; // with oldval replacing *addr in rhs oldval = __sync_val_compare_and_swap (addr, oldval, newval); if (oldval != newval) goto repeat; INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. */ static bool expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb, tree addr, tree loaded_val, tree stored_val, int index) { tree loadedi, storedi, initial, new_storedi, old_vali; tree type, itype, cmpxchg, iaddr; gimple_stmt_iterator si; basic_block loop_header = single_succ (load_bb); gimple phi, stmt; edge e; enum built_in_function fncode; /* ??? We need a non-pointer interface to __atomic_compare_exchange in order to use the RELAXED memory model effectively. */ fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N + index + 1); cmpxchg = builtin_decl_explicit (fncode); if (cmpxchg == NULL_TREE) return false; type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); itype = TREE_TYPE (TREE_TYPE (cmpxchg)); if (!can_compare_and_swap_p (TYPE_MODE (itype), true)) return false; /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */ si = gsi_last_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD); /* For floating-point values, we'll need to view-convert them to integers so that we can perform the atomic compare and swap. Simplify the following code by always setting up the "i"ntegral variables. */ if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type)) { tree iaddr_val; iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode, true), NULL); iaddr_val = force_gimple_operand_gsi (&si, fold_convert (TREE_TYPE (iaddr), addr), false, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (iaddr, iaddr_val); gsi_insert_before (&si, stmt, GSI_SAME_STMT); loadedi = create_tmp_var (itype, NULL); if (gimple_in_ssa_p (cfun)) { add_referenced_var (iaddr); add_referenced_var (loadedi); loadedi = make_ssa_name (loadedi, NULL); } } else { iaddr = addr; loadedi = loaded_val; } initial = force_gimple_operand_gsi (&si, build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr, build_int_cst (TREE_TYPE (iaddr), 0)), true, NULL_TREE, true, GSI_SAME_STMT); /* Move the value to the LOADEDI temporary. */ if (gimple_in_ssa_p (cfun)) { gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header))); phi = create_phi_node (loadedi, loop_header); SSA_NAME_DEF_STMT (loadedi) = phi; SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)), initial); } else gsi_insert_before (&si, gimple_build_assign (loadedi, initial), GSI_SAME_STMT); if (loadedi != loaded_val) { gimple_stmt_iterator gsi2; tree x; x = build1 (VIEW_CONVERT_EXPR, type, loadedi); gsi2 = gsi_start_bb (loop_header); if (gimple_in_ssa_p (cfun)) { gimple stmt; x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (loaded_val, x); gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT); } else { x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x); force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE, true, GSI_SAME_STMT); } } gsi_remove (&si, true); si = gsi_last_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE); if (iaddr == addr) storedi = stored_val; else storedi = force_gimple_operand_gsi (&si, build1 (VIEW_CONVERT_EXPR, itype, stored_val), true, NULL_TREE, true, GSI_SAME_STMT); /* Build the compare&swap statement. */ new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi); new_storedi = force_gimple_operand_gsi (&si, fold_convert (TREE_TYPE (loadedi), new_storedi), true, NULL_TREE, true, GSI_SAME_STMT); if (gimple_in_ssa_p (cfun)) old_vali = loadedi; else { old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL); if (gimple_in_ssa_p (cfun)) add_referenced_var (old_vali); stmt = gimple_build_assign (old_vali, loadedi); gsi_insert_before (&si, stmt, GSI_SAME_STMT); stmt = gimple_build_assign (loadedi, new_storedi); gsi_insert_before (&si, stmt, GSI_SAME_STMT); } /* Note that we always perform the comparison as an integer, even for floating point. This allows the atomic operation to properly succeed even with NaNs and -0.0. */ stmt = gimple_build_cond_empty (build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali)); gsi_insert_before (&si, stmt, GSI_SAME_STMT); /* Update cfg. */ e = single_succ_edge (store_bb); e->flags &= ~EDGE_FALLTHRU; e->flags |= EDGE_FALSE_VALUE; e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE); /* Copy the new value to loadedi (we already did that before the condition if we are not in SSA). */ if (gimple_in_ssa_p (cfun)) { phi = gimple_seq_first_stmt (phi_nodes (loop_header)); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi); } /* Remove GIMPLE_OMP_ATOMIC_STORE. */ gsi_remove (&si, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Implement the atomic operation as: GOMP_atomic_start (); *addr = rhs; GOMP_atomic_end (); The result is not globally atomic, but works so long as all parallel references are within #pragma omp atomic directives. According to responses received from omp@openmp.org, appears to be within spec. Which makes sense, since that's how several other compilers handle this situation as well. LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're expanding. STORED_VAL is the operand of the matching GIMPLE_OMP_ATOMIC_STORE. We replace GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with loaded_val = *addr; and replace GIMPLE_OMP_ATOMIC_STORE (stored_val) with *addr = stored_val; */ static bool expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb, tree addr, tree loaded_val, tree stored_val) { gimple_stmt_iterator si; gimple stmt; tree t; si = gsi_last_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD); t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START); t = build_call_expr (t, 0); force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr)); gsi_insert_before (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); si = gsi_last_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE); stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)), stored_val); gsi_insert_before (&si, stmt, GSI_SAME_STMT); t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END); t = build_call_expr (t, 0); force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&si, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand using expand_omp_atomic_fetch_op. If it failed, we try to call expand_omp_atomic_pipeline, and if it fails too, the ultimate fallback is wrapping the operation in a mutex (expand_omp_atomic_mutex). REGION is the atomic region built by build_omp_regions_1(). */ static void expand_omp_atomic (struct omp_region *region) { basic_block load_bb = region->entry, store_bb = region->exit; gimple load = last_stmt (load_bb), store = last_stmt (store_bb); tree loaded_val = gimple_omp_atomic_load_lhs (load); tree addr = gimple_omp_atomic_load_rhs (load); tree stored_val = gimple_omp_atomic_store_val (store); tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); HOST_WIDE_INT index; /* Make sure the type is one of the supported sizes. */ index = tree_low_cst (TYPE_SIZE_UNIT (type), 1); index = exact_log2 (index); if (index >= 0 && index <= 4) { unsigned int align = TYPE_ALIGN_UNIT (type); /* __sync builtins require strict data alignment. */ if (exact_log2 (align) >= index) { /* Atomic load. */ if (loaded_val == stored_val && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT) && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD && expand_omp_atomic_load (load_bb, addr, loaded_val, index)) return; /* Atomic store. */ if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT) && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD && store_bb == single_succ (load_bb) && first_stmt (store_bb) == store && expand_omp_atomic_store (load_bb, addr, loaded_val, stored_val, index)) return; /* When possible, use specialized atomic update functions. */ if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) && store_bb == single_succ (load_bb) && expand_omp_atomic_fetch_op (load_bb, addr, loaded_val, stored_val, index)) return; /* If we don't have specialized __sync builtins, try and implement as a compare and swap loop. */ if (expand_omp_atomic_pipeline (load_bb, store_bb, addr, loaded_val, stored_val, index)) return; } } /* The ultimate fallback is wrapping the operation in a mutex. */ expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val); } /* Expand the parallel region tree rooted at REGION. Expansion proceeds in depth-first order. Innermost regions are expanded first. This way, parallel regions that require a new function to be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any internal dependencies in their body. */ static void expand_omp (struct omp_region *region) { while (region) { location_t saved_location; /* First, determine whether this is a combined parallel+workshare region. */ if (region->type == GIMPLE_OMP_PARALLEL) determine_parallel_type (region); if (region->inner) expand_omp (region->inner); saved_location = input_location; if (gimple_has_location (last_stmt (region->entry))) input_location = gimple_location (last_stmt (region->entry)); switch (region->type) { case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: expand_omp_taskreg (region); break; case GIMPLE_OMP_FOR: expand_omp_for (region); break; case GIMPLE_OMP_SECTIONS: expand_omp_sections (region); break; case GIMPLE_OMP_SECTION: /* Individual omp sections are handled together with their parent GIMPLE_OMP_SECTIONS region. */ break; case GIMPLE_OMP_SINGLE: expand_omp_single (region); break; case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_CRITICAL: expand_omp_synch (region); break; case GIMPLE_OMP_ATOMIC_LOAD: expand_omp_atomic (region); break; default: gcc_unreachable (); } input_location = saved_location; region = region->next; } } /* Helper for build_omp_regions. Scan the dominator tree starting at block BB. PARENT is the region that contains BB. If SINGLE_TREE is true, the function ends once a single tree is built (otherwise, whole forest of OMP constructs may be built). */ static void build_omp_regions_1 (basic_block bb, struct omp_region *parent, bool single_tree) { gimple_stmt_iterator gsi; gimple stmt; basic_block son; gsi = gsi_last_bb (bb); if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi))) { struct omp_region *region; enum gimple_code code; stmt = gsi_stmt (gsi); code = gimple_code (stmt); if (code == GIMPLE_OMP_RETURN) { /* STMT is the return point out of region PARENT. Mark it as the exit point and make PARENT the immediately enclosing region. */ gcc_assert (parent); region = parent; region->exit = bb; parent = parent->outer; } else if (code == GIMPLE_OMP_ATOMIC_STORE) { /* GIMPLE_OMP_ATOMIC_STORE is analoguous to GIMPLE_OMP_RETURN, but matches with GIMPLE_OMP_ATOMIC_LOAD. */ gcc_assert (parent); gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD); region = parent; region->exit = bb; parent = parent->outer; } else if (code == GIMPLE_OMP_CONTINUE) { gcc_assert (parent); parent->cont = bb; } else if (code == GIMPLE_OMP_SECTIONS_SWITCH) { /* GIMPLE_OMP_SECTIONS_SWITCH is part of GIMPLE_OMP_SECTIONS, and we do nothing for it. */ ; } else { /* Otherwise, this directive becomes the parent for a new region. */ region = new_omp_region (bb, code, parent); parent = region; } } if (single_tree && !parent) return; for (son = first_dom_son (CDI_DOMINATORS, bb); son; son = next_dom_son (CDI_DOMINATORS, son)) build_omp_regions_1 (son, parent, single_tree); } /* Builds the tree of OMP regions rooted at ROOT, storing it to root_omp_region. */ static void build_omp_regions_root (basic_block root) { gcc_assert (root_omp_region == NULL); build_omp_regions_1 (root, NULL, true); gcc_assert (root_omp_region != NULL); } /* Expands omp construct (and its subconstructs) starting in HEAD. */ void omp_expand_local (basic_block head) { build_omp_regions_root (head); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); free_omp_regions (); } /* Scan the CFG and build a tree of OMP regions. Return the root of the OMP region tree. */ static void build_omp_regions (void) { gcc_assert (root_omp_region == NULL); calculate_dominance_info (CDI_DOMINATORS); build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false); } /* Main entry point for expanding OMP-GIMPLE into runtime calls. */ static unsigned int execute_expand_omp (void) { build_omp_regions (); if (!root_omp_region) return 0; if (dump_file) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); cleanup_tree_cfg (); free_omp_regions (); return 0; } /* OMP expansion -- the default pass, run before creation of SSA form. */ static bool gate_expand_omp (void) { return (flag_openmp != 0 && !seen_error ()); } struct gimple_opt_pass pass_expand_omp = { { GIMPLE_PASS, "ompexp", /* name */ gate_expand_omp, /* gate */ execute_expand_omp, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_NONE, /* tv_id */ PROP_gimple_any, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ } }; /* Routines to lower OpenMP directives into OMP-GIMPLE. */ /* Lower the OpenMP sections directive in the current statement in GSI_P. CTX is the enclosing OMP context for the current statement. */ static void lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree block, control; gimple_stmt_iterator tgsi; unsigned i, len; gimple stmt, new_stmt, bind, t; gimple_seq ilist, dlist, olist, new_body, body; struct gimplify_ctx gctx; stmt = gsi_stmt (*gsi_p); push_gimplify_context (&gctx); dlist = NULL; ilist = NULL; lower_rec_input_clauses (gimple_omp_sections_clauses (stmt), &ilist, &dlist, ctx); tgsi = gsi_start (gimple_omp_body (stmt)); for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi)) continue; tgsi = gsi_start (gimple_omp_body (stmt)); body = NULL; for (i = 0; i < len; i++, gsi_next (&tgsi)) { omp_context *sctx; gimple sec_start; sec_start = gsi_stmt (tgsi); sctx = maybe_lookup_ctx (sec_start); gcc_assert (sctx); gimple_seq_add_stmt (&body, sec_start); lower_omp (gimple_omp_body (sec_start), sctx); gimple_seq_add_seq (&body, gimple_omp_body (sec_start)); gimple_omp_set_body (sec_start, NULL); if (i == len - 1) { gimple_seq l = NULL; lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL, &l, ctx); gimple_seq_add_seq (&body, l); gimple_omp_section_set_last (sec_start); } gimple_seq_add_stmt (&body, gimple_build_omp_return (false)); } block = make_node (BLOCK); bind = gimple_build_bind (NULL, body, block); olist = NULL; lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx); block = make_node (BLOCK); new_stmt = gimple_build_bind (NULL, NULL, block); pop_gimplify_context (new_stmt); gimple_bind_append_vars (new_stmt, ctx->block_vars); BLOCK_VARS (block) = gimple_bind_vars (bind); if (BLOCK_VARS (block)) TREE_USED (block) = 1; new_body = NULL; gimple_seq_add_seq (&new_body, ilist); gimple_seq_add_stmt (&new_body, stmt); gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ()); gimple_seq_add_stmt (&new_body, bind); control = create_tmp_var (unsigned_type_node, ".section"); t = gimple_build_omp_continue (control, control); gimple_omp_sections_set_control (stmt, control); gimple_seq_add_stmt (&new_body, t); gimple_seq_add_seq (&new_body, olist); gimple_seq_add_seq (&new_body, dlist); new_body = maybe_catch_exception (new_body); t = gimple_build_omp_return (!!find_omp_clause (gimple_omp_sections_clauses (stmt), OMP_CLAUSE_NOWAIT)); gimple_seq_add_stmt (&new_body, t); gimple_bind_set_body (new_stmt, new_body); gimple_omp_set_body (stmt, NULL); gsi_replace (gsi_p, new_stmt, true); } /* A subroutine of lower_omp_single. Expand the simple form of a GIMPLE_OMP_SINGLE, without a copyprivate clause: if (GOMP_single_start ()) BODY; [ GOMP_barrier (); ] -> unless 'nowait' is present. FIXME. It may be better to delay expanding the logic of this until pass_expand_omp. The expanded logic may make the job more difficult to a synchronization analysis pass. */ static void lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p) { location_t loc = gimple_location (single_stmt); tree tlabel = create_artificial_label (loc); tree flabel = create_artificial_label (loc); gimple call, cond; tree lhs, decl; decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START); lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL); call = gimple_build_call (decl, 0); gimple_call_set_lhs (call, lhs); gimple_seq_add_stmt (pre_p, call); cond = gimple_build_cond (EQ_EXPR, lhs, fold_convert_loc (loc, TREE_TYPE (lhs), boolean_true_node), tlabel, flabel); gimple_seq_add_stmt (pre_p, cond); gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel)); gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt)); gimple_seq_add_stmt (pre_p, gimple_build_label (flabel)); } /* A subroutine of lower_omp_single. Expand the simple form of a GIMPLE_OMP_SINGLE, with a copyprivate clause: #pragma omp single copyprivate (a, b, c) Create a new structure to hold copies of 'a', 'b' and 'c' and emit: { if ((copyout_p = GOMP_single_copy_start ()) == NULL) { BODY; copyout.a = a; copyout.b = b; copyout.c = c; GOMP_single_copy_end (&copyout); } else { a = copyout_p->a; b = copyout_p->b; c = copyout_p->c; } GOMP_barrier (); } FIXME. It may be better to delay expanding the logic of this until pass_expand_omp. The expanded logic may make the job more difficult to a synchronization analysis pass. */ static void lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx) { tree ptr_type, t, l0, l1, l2, bfn_decl; gimple_seq copyin_seq; location_t loc = gimple_location (single_stmt); ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o"); ptr_type = build_pointer_type (ctx->record_type); ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i"); l0 = create_artificial_label (loc); l1 = create_artificial_label (loc); l2 = create_artificial_label (loc); bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START); t = build_call_expr_loc (loc, bfn_decl, 0); t = fold_convert_loc (loc, ptr_type, t); gimplify_assign (ctx->receiver_decl, t, pre_p); t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl, build_int_cst (ptr_type, 0)); t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0), build_and_jump (&l1)); gimplify_and_add (t, pre_p); gimple_seq_add_stmt (pre_p, gimple_build_label (l0)); gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt)); copyin_seq = NULL; lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p, &copyin_seq, ctx); t = build_fold_addr_expr_loc (loc, ctx->sender_decl); bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END); t = build_call_expr_loc (loc, bfn_decl, 1, t); gimplify_and_add (t, pre_p); t = build_and_jump (&l2); gimplify_and_add (t, pre_p); gimple_seq_add_stmt (pre_p, gimple_build_label (l1)); gimple_seq_add_seq (pre_p, copyin_seq); gimple_seq_add_stmt (pre_p, gimple_build_label (l2)); } /* Expand code for an OpenMP single directive. */ static void lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree block; gimple t, bind, single_stmt = gsi_stmt (*gsi_p); gimple_seq bind_body, dlist; struct gimplify_ctx gctx; push_gimplify_context (&gctx); bind_body = NULL; lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt), &bind_body, &dlist, ctx); lower_omp (gimple_omp_body (single_stmt), ctx); gimple_seq_add_stmt (&bind_body, single_stmt); if (ctx->record_type) lower_omp_single_copy (single_stmt, &bind_body, ctx); else lower_omp_single_simple (single_stmt, &bind_body); gimple_omp_set_body (single_stmt, NULL); gimple_seq_add_seq (&bind_body, dlist); bind_body = maybe_catch_exception (bind_body); t = gimple_build_omp_return (!!find_omp_clause (gimple_omp_single_clauses (single_stmt), OMP_CLAUSE_NOWAIT)); gimple_seq_add_stmt (&bind_body, t); block = make_node (BLOCK); bind = gimple_build_bind (NULL, bind_body, block); pop_gimplify_context (bind); gimple_bind_append_vars (bind, ctx->block_vars); BLOCK_VARS (block) = ctx->block_vars; gsi_replace (gsi_p, bind, true); if (BLOCK_VARS (block)) TREE_USED (block) = 1; } /* Expand code for an OpenMP master directive. */ static void lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree block, lab = NULL, x, bfn_decl; gimple stmt = gsi_stmt (*gsi_p), bind; location_t loc = gimple_location (stmt); gimple_seq tseq; struct gimplify_ctx gctx; push_gimplify_context (&gctx); block = make_node (BLOCK); bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block); bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); x = build_call_expr_loc (loc, bfn_decl, 0); x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node); x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab)); tseq = NULL; gimplify_and_add (x, &tseq); gimple_bind_add_seq (bind, tseq); lower_omp (gimple_omp_body (stmt), ctx); gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt))); gimple_bind_add_seq (bind, gimple_omp_body (stmt)); gimple_omp_set_body (stmt, NULL); gimple_bind_add_stmt (bind, gimple_build_label (lab)); gimple_bind_add_stmt (bind, gimple_build_omp_return (true)); pop_gimplify_context (bind); gimple_bind_append_vars (bind, ctx->block_vars); BLOCK_VARS (block) = ctx->block_vars; gsi_replace (gsi_p, bind, true); } /* Expand code for an OpenMP ordered directive. */ static void lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree block; gimple stmt = gsi_stmt (*gsi_p), bind, x; struct gimplify_ctx gctx; push_gimplify_context (&gctx); block = make_node (BLOCK); bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block); x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START), 0); gimple_bind_add_stmt (bind, x); lower_omp (gimple_omp_body (stmt), ctx); gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt))); gimple_bind_add_seq (bind, gimple_omp_body (stmt)); gimple_omp_set_body (stmt, NULL); x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0); gimple_bind_add_stmt (bind, x); gimple_bind_add_stmt (bind, gimple_build_omp_return (true)); pop_gimplify_context (bind); gimple_bind_append_vars (bind, ctx->block_vars); BLOCK_VARS (block) = gimple_bind_vars (bind); gsi_replace (gsi_p, bind, true); } /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple substitution of a couple of function calls. But in the NAMED case, requires that languages coordinate a symbol name. It is therefore best put here in common code. */ static GTY((param1_is (tree), param2_is (tree))) splay_tree critical_name_mutexes; static void lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree block; tree name, lock, unlock; gimple stmt = gsi_stmt (*gsi_p), bind; location_t loc = gimple_location (stmt); gimple_seq tbody; struct gimplify_ctx gctx; name = gimple_omp_critical_name (stmt); if (name) { tree decl; splay_tree_node n; if (!critical_name_mutexes) critical_name_mutexes = splay_tree_new_ggc (splay_tree_compare_pointers, ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s, ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s); n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name); if (n == NULL) { char *new_str; decl = create_tmp_var_raw (ptr_type_node, NULL); new_str = ACONCAT ((".gomp_critical_user_", IDENTIFIER_POINTER (name), NULL)); DECL_NAME (decl) = get_identifier (new_str); TREE_PUBLIC (decl) = 1; TREE_STATIC (decl) = 1; DECL_COMMON (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; varpool_finalize_decl (decl); splay_tree_insert (critical_name_mutexes, (splay_tree_key) name, (splay_tree_value) decl); } else decl = (tree) n->value; lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START); lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl)); unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END); unlock = build_call_expr_loc (loc, unlock, 1, build_fold_addr_expr_loc (loc, decl)); } else { lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START); lock = build_call_expr_loc (loc, lock, 0); unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END); unlock = build_call_expr_loc (loc, unlock, 0); } push_gimplify_context (&gctx); block = make_node (BLOCK); bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block); tbody = gimple_bind_body (bind); gimplify_and_add (lock, &tbody); gimple_bind_set_body (bind, tbody); lower_omp (gimple_omp_body (stmt), ctx); gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt))); gimple_bind_add_seq (bind, gimple_omp_body (stmt)); gimple_omp_set_body (stmt, NULL); tbody = gimple_bind_body (bind); gimplify_and_add (unlock, &tbody); gimple_bind_set_body (bind, tbody); gimple_bind_add_stmt (bind, gimple_build_omp_return (true)); pop_gimplify_context (bind); gimple_bind_append_vars (bind, ctx->block_vars); BLOCK_VARS (block) = gimple_bind_vars (bind); gsi_replace (gsi_p, bind, true); } /* A subroutine of lower_omp_for. Generate code to emit the predicate for a lastprivate clause. Given a loop control predicate of (V cond N2), we gate the clause on (!(V cond N2)). The lowered form is appended to *DLIST, iterator initialization is appended to *BODY_P. */ static void lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p, gimple_seq *dlist, struct omp_context *ctx) { tree clauses, cond, vinit; enum tree_code cond_code; gimple_seq stmts; cond_code = fd->loop.cond_code; cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR; /* When possible, use a strict equality expression. This can let VRP type optimizations deduce the value and remove a copy. */ if (host_integerp (fd->loop.step, 0)) { HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step); if (step == 1 || step == -1) cond_code = EQ_EXPR; } cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2); clauses = gimple_omp_for_clauses (fd->for_stmt); stmts = NULL; lower_lastprivate_clauses (clauses, cond, &stmts, ctx); if (!gimple_seq_empty_p (stmts)) { gimple_seq_add_seq (&stmts, *dlist); *dlist = stmts; /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */ vinit = fd->loop.n1; if (cond_code == EQ_EXPR && host_integerp (fd->loop.n2, 0) && ! integer_zerop (fd->loop.n2)) vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0); /* Initialize the iterator variable, so that threads that don't execute any iterations don't execute the lastprivate clauses by accident. */ gimplify_assign (fd->loop.v, vinit, body_p); } } /* Lower code for an OpenMP loop directive. */ static void lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree *rhs_p, block; struct omp_for_data fd; gimple stmt = gsi_stmt (*gsi_p), new_stmt; gimple_seq omp_for_body, body, dlist; size_t i; struct gimplify_ctx gctx; push_gimplify_context (&gctx); lower_omp (gimple_omp_for_pre_body (stmt), ctx); lower_omp (gimple_omp_body (stmt), ctx); block = make_node (BLOCK); new_stmt = gimple_build_bind (NULL, NULL, block); /* Move declaration of temporaries in the loop body before we make it go away. */ omp_for_body = gimple_omp_body (stmt); if (!gimple_seq_empty_p (omp_for_body) && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND) { tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body)); gimple_bind_append_vars (new_stmt, vars); } /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */ dlist = NULL; body = NULL; lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx); gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt)); /* Lower the header expressions. At this point, we can assume that the header is of the form: #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3) We just need to make sure that VAL1, VAL2 and VAL3 are lowered using the .omp_data_s mapping, if needed. */ for (i = 0; i < gimple_omp_for_collapse (stmt); i++) { rhs_p = gimple_omp_for_initial_ptr (stmt, i); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, &body); rhs_p = gimple_omp_for_final_ptr (stmt, i); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, &body); rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1); if (!is_gimple_min_invariant (*rhs_p)) *rhs_p = get_formal_tmp_var (*rhs_p, &body); } /* Once lowered, extract the bounds and clauses. */ extract_omp_for_data (stmt, &fd, NULL); lower_omp_for_lastprivate (&fd, &body, &dlist, ctx); gimple_seq_add_stmt (&body, stmt); gimple_seq_add_seq (&body, gimple_omp_body (stmt)); gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v, fd.loop.v)); /* After the loop, add exit clauses. */ lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx); gimple_seq_add_seq (&body, dlist); body = maybe_catch_exception (body); /* Region exit marker goes at the end of the loop body. */ gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait)); pop_gimplify_context (new_stmt); gimple_bind_append_vars (new_stmt, ctx->block_vars); BLOCK_VARS (block) = gimple_bind_vars (new_stmt); if (BLOCK_VARS (block)) TREE_USED (block) = 1; gimple_bind_set_body (new_stmt, body); gimple_omp_set_body (stmt, NULL); gimple_omp_for_set_pre_body (stmt, NULL); gsi_replace (gsi_p, new_stmt, true); } /* Callback for walk_stmts. Check if the current statement only contains GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */ static tree check_combined_parallel (gimple_stmt_iterator *gsi_p, bool *handled_ops_p, struct walk_stmt_info *wi) { int *info = (int *) wi->info; gimple stmt = gsi_stmt (*gsi_p); *handled_ops_p = true; switch (gimple_code (stmt)) { WALK_SUBSTMTS; case GIMPLE_OMP_FOR: case GIMPLE_OMP_SECTIONS: *info = *info == 0 ? 1 : -1; break; default: *info = -1; break; } return NULL; } struct omp_taskcopy_context { /* This field must be at the beginning, as we do "inheritance": Some callback functions for tree-inline.c (e.g., omp_copy_decl) receive a copy_body_data pointer that is up-casted to an omp_context pointer. */ copy_body_data cb; omp_context *ctx; }; static tree task_copyfn_copy_decl (tree var, copy_body_data *cb) { struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb; if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var)) return create_tmp_var (TREE_TYPE (var), NULL); return var; } static tree task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type) { tree name, new_fields = NULL, type, f; type = lang_hooks.types.make_type (RECORD_TYPE); name = DECL_NAME (TYPE_NAME (orig_type)); name = build_decl (gimple_location (tcctx->ctx->stmt), TYPE_DECL, name, type); TYPE_NAME (type) = name; for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f)) { tree new_f = copy_node (f); DECL_CONTEXT (new_f) = type; TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb); TREE_CHAIN (new_f) = new_fields; walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL); walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL); walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r, &tcctx->cb, NULL); new_fields = new_f; *pointer_map_insert (tcctx->cb.decl_map, f) = new_f; } TYPE_FIELDS (type) = nreverse (new_fields); layout_type (type); return type; } /* Create task copyfn. */ static void create_task_copyfn (gimple task_stmt, omp_context *ctx) { struct function *child_cfun; tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl; tree record_type, srecord_type, bind, list; bool record_needs_remap = false, srecord_needs_remap = false; splay_tree_node n; struct omp_taskcopy_context tcctx; struct gimplify_ctx gctx; location_t loc = gimple_location (task_stmt); child_fn = gimple_omp_task_copy_fn (task_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); gcc_assert (child_cfun->cfg == NULL); DECL_SAVED_TREE (child_fn) = alloc_stmt_list (); /* Reset DECL_CONTEXT on function arguments. */ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Populate the function. */ push_gimplify_context (&gctx); current_function_decl = child_fn; bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; list = NULL; DECL_SAVED_TREE (child_fn) = bind; DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt); /* Remap src and dst argument types if needed. */ record_type = ctx->record_type; srecord_type = ctx->srecord_type; for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f)) if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn)) { record_needs_remap = true; break; } for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f)) if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn)) { srecord_needs_remap = true; break; } if (record_needs_remap || srecord_needs_remap) { memset (&tcctx, '\0', sizeof (tcctx)); tcctx.cb.src_fn = ctx->cb.src_fn; tcctx.cb.dst_fn = child_fn; tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn); gcc_checking_assert (tcctx.cb.src_node); tcctx.cb.dst_node = tcctx.cb.src_node; tcctx.cb.src_cfun = ctx->cb.src_cfun; tcctx.cb.copy_decl = task_copyfn_copy_decl; tcctx.cb.eh_lp_nr = 0; tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE; tcctx.cb.decl_map = pointer_map_create (); tcctx.ctx = ctx; if (record_needs_remap) record_type = task_copyfn_remap_type (&tcctx, record_type); if (srecord_needs_remap) srecord_type = task_copyfn_remap_type (&tcctx, srecord_type); } else tcctx.cb.decl_map = NULL; push_cfun (child_cfun); arg = DECL_ARGUMENTS (child_fn); TREE_TYPE (arg) = build_pointer_type (record_type); sarg = DECL_CHAIN (arg); TREE_TYPE (sarg) = build_pointer_type (srecord_type); /* First pass: initialize temporaries used in record_type and srecord_type sizes and field offsets. */ if (tcctx.cb.decl_map) for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE) { tree *p; decl = OMP_CLAUSE_DECL (c); p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl); if (p == NULL) continue; n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl); sf = (tree) n->value; sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf); src = build_simple_mem_ref_loc (loc, sarg); src = omp_build_component_ref (src, sf); t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src); append_to_statement_list (t, &list); } /* Second pass: copy shared var pointers and copy construct non-VLA firstprivate vars. */ for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c)) switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl); if (n == NULL) break; f = (tree) n->value; if (tcctx.cb.decl_map) f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f); n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl); sf = (tree) n->value; if (tcctx.cb.decl_map) sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf); src = build_simple_mem_ref_loc (loc, sarg); src = omp_build_component_ref (src, sf); dst = build_simple_mem_ref_loc (loc, arg); dst = omp_build_component_ref (dst, f); t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src); append_to_statement_list (t, &list); break; case OMP_CLAUSE_FIRSTPRIVATE: decl = OMP_CLAUSE_DECL (c); if (is_variable_sized (decl)) break; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl); if (n == NULL) break; f = (tree) n->value; if (tcctx.cb.decl_map) f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f); n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl); if (n != NULL) { sf = (tree) n->value; if (tcctx.cb.decl_map) sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf); src = build_simple_mem_ref_loc (loc, sarg); src = omp_build_component_ref (src, sf); if (use_pointer_for_field (decl, NULL) || is_reference (decl)) src = build_simple_mem_ref_loc (loc, src); } else src = decl; dst = build_simple_mem_ref_loc (loc, arg); dst = omp_build_component_ref (dst, f); t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src); append_to_statement_list (t, &list); break; case OMP_CLAUSE_PRIVATE: if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c)) break; decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl); f = (tree) n->value; if (tcctx.cb.decl_map) f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f); n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl); if (n != NULL) { sf = (tree) n->value; if (tcctx.cb.decl_map) sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf); src = build_simple_mem_ref_loc (loc, sarg); src = omp_build_component_ref (src, sf); if (use_pointer_for_field (decl, NULL)) src = build_simple_mem_ref_loc (loc, src); } else src = decl; dst = build_simple_mem_ref_loc (loc, arg); dst = omp_build_component_ref (dst, f); t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src); append_to_statement_list (t, &list); break; default: break; } /* Last pass: handle VLA firstprivates. */ if (tcctx.cb.decl_map) for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE) { tree ind, ptr, df; decl = OMP_CLAUSE_DECL (c); if (!is_variable_sized (decl)) continue; n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl); if (n == NULL) continue; f = (tree) n->value; f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f); gcc_assert (DECL_HAS_VALUE_EXPR_P (decl)); ind = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (ind) == INDIRECT_REF); gcc_assert (DECL_P (TREE_OPERAND (ind, 0))); n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) TREE_OPERAND (ind, 0)); sf = (tree) n->value; sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf); src = build_simple_mem_ref_loc (loc, sarg); src = omp_build_component_ref (src, sf); src = build_simple_mem_ref_loc (loc, src); dst = build_simple_mem_ref_loc (loc, arg); dst = omp_build_component_ref (dst, f); t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src); append_to_statement_list (t, &list); n = splay_tree_lookup (ctx->field_map, (splay_tree_key) TREE_OPERAND (ind, 0)); df = (tree) n->value; df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df); ptr = build_simple_mem_ref_loc (loc, arg); ptr = omp_build_component_ref (ptr, df); t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr, build_fold_addr_expr_loc (loc, dst)); append_to_statement_list (t, &list); } t = build1 (RETURN_EXPR, void_type_node, NULL); append_to_statement_list (t, &list); if (tcctx.cb.decl_map) pointer_map_destroy (tcctx.cb.decl_map); pop_gimplify_context (NULL); BIND_EXPR_BODY (bind) = list; pop_cfun (); current_function_decl = ctx->cb.src_fn; } /* Lower the OpenMP parallel or task directive in the current statement in GSI_P. CTX holds context information for the directive. */ static void lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx) { tree clauses; tree child_fn, t; gimple stmt = gsi_stmt (*gsi_p); gimple par_bind, bind; gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body; struct gimplify_ctx gctx; location_t loc = gimple_location (stmt); clauses = gimple_omp_taskreg_clauses (stmt); par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt)); par_body = gimple_bind_body (par_bind); child_fn = ctx->cb.dst_fn; if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL && !gimple_omp_parallel_combined_p (stmt)) { struct walk_stmt_info wi; int ws_num = 0; memset (&wi, 0, sizeof (wi)); wi.info = &ws_num; wi.val_only = true; walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi); if (ws_num == 1) gimple_omp_parallel_set_combined_p (stmt, true); } if (ctx->srecord_type) create_task_copyfn (stmt, ctx); push_gimplify_context (&gctx); par_olist = NULL; par_ilist = NULL; lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx); lower_omp (par_body, ctx); if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL) lower_reduction_clauses (clauses, &par_olist, ctx); /* Declare all the variables created by mapping and the variables declared in the scope of the parallel body. */ record_vars_into (ctx->block_vars, child_fn); record_vars_into (gimple_bind_vars (par_bind), child_fn); if (ctx->record_type) { ctx->sender_decl = create_tmp_var (ctx->srecord_type ? ctx->srecord_type : ctx->record_type, ".omp_data_o"); DECL_NAMELESS (ctx->sender_decl) = 1; TREE_ADDRESSABLE (ctx->sender_decl) = 1; gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl); } olist = NULL; ilist = NULL; lower_send_clauses (clauses, &ilist, &olist, ctx); lower_send_shared_vars (&ilist, &olist, ctx); /* Once all the expansions are done, sequence all the different fragments inside gimple_omp_body. */ new_body = NULL; if (ctx->record_type) { t = build_fold_addr_expr_loc (loc, ctx->sender_decl); /* fixup_child_record_type might have changed receiver_decl's type. */ t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t); gimple_seq_add_stmt (&new_body, gimple_build_assign (ctx->receiver_decl, t)); } gimple_seq_add_seq (&new_body, par_ilist); gimple_seq_add_seq (&new_body, par_body); gimple_seq_add_seq (&new_body, par_olist); new_body = maybe_catch_exception (new_body); gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false)); gimple_omp_set_body (stmt, new_body); bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind)); gimple_bind_add_stmt (bind, stmt); if (ilist || olist) { gimple_seq_add_stmt (&ilist, bind); gimple_seq_add_seq (&ilist, olist); bind = gimple_build_bind (NULL, ilist, NULL); } gsi_replace (gsi_p, bind, true); pop_gimplify_context (NULL); } /* Callback for lower_omp_1. Return non-NULL if *tp needs to be regimplified. If DATA is non-NULL, lower_omp_1 is outside of OpenMP context, but with task_shared_vars set. */ static tree lower_omp_regimplify_p (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */ if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t)) return t; if (task_shared_vars && DECL_P (t) && bitmap_bit_p (task_shared_vars, DECL_UID (t))) return t; /* If a global variable has been privatized, TREE_CONSTANT on ADDR_EXPR might be wrong. */ if (data == NULL && TREE_CODE (t) == ADDR_EXPR) recompute_tree_invariant_for_addr_expr (t); *walk_subtrees = !TYPE_P (t) && !DECL_P (t); return NULL_TREE; } static void lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx) { gimple stmt = gsi_stmt (*gsi_p); struct walk_stmt_info wi; if (gimple_has_location (stmt)) input_location = gimple_location (stmt); if (task_shared_vars) memset (&wi, '\0', sizeof (wi)); /* If we have issued syntax errors, avoid doing any heavy lifting. Just replace the OpenMP directives with a NOP to avoid confusing RTL expansion. */ if (seen_error () && is_gimple_omp (stmt)) { gsi_replace (gsi_p, gimple_build_nop (), true); return; } switch (gimple_code (stmt)) { case GIMPLE_COND: if ((ctx || task_shared_vars) && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p, ctx ? NULL : &wi, NULL) || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))) gimple_regimplify_operands (stmt, gsi_p); break; case GIMPLE_CATCH: lower_omp (gimple_catch_handler (stmt), ctx); break; case GIMPLE_EH_FILTER: lower_omp (gimple_eh_filter_failure (stmt), ctx); break; case GIMPLE_TRY: lower_omp (gimple_try_eval (stmt), ctx); lower_omp (gimple_try_cleanup (stmt), ctx); break; case GIMPLE_TRANSACTION: lower_omp (gimple_transaction_body (stmt), ctx); break; case GIMPLE_BIND: lower_omp (gimple_bind_body (stmt), ctx); break; case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: ctx = maybe_lookup_ctx (stmt); lower_omp_taskreg (gsi_p, ctx); break; case GIMPLE_OMP_FOR: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_for (gsi_p, ctx); break; case GIMPLE_OMP_SECTIONS: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_sections (gsi_p, ctx); break; case GIMPLE_OMP_SINGLE: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_single (gsi_p, ctx); break; case GIMPLE_OMP_MASTER: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_master (gsi_p, ctx); break; case GIMPLE_OMP_ORDERED: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_ordered (gsi_p, ctx); break; case GIMPLE_OMP_CRITICAL: ctx = maybe_lookup_ctx (stmt); gcc_assert (ctx); lower_omp_critical (gsi_p, ctx); break; case GIMPLE_OMP_ATOMIC_LOAD: if ((ctx || task_shared_vars) && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt), lower_omp_regimplify_p, ctx ? NULL : &wi, NULL)) gimple_regimplify_operands (stmt, gsi_p); break; default: if ((ctx || task_shared_vars) && walk_gimple_op (stmt, lower_omp_regimplify_p, ctx ? NULL : &wi)) gimple_regimplify_operands (stmt, gsi_p); break; } } static void lower_omp (gimple_seq body, omp_context *ctx) { location_t saved_location = input_location; gimple_stmt_iterator gsi = gsi_start (body); for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi)) lower_omp_1 (&gsi, ctx); input_location = saved_location; } /* Main entry point. */ static unsigned int execute_lower_omp (void) { gimple_seq body; /* This pass always runs, to provide PROP_gimple_lomp. But there is nothing to do unless -fopenmp is given. */ if (flag_openmp == 0) return 0; all_contexts = splay_tree_new (splay_tree_compare_pointers, 0, delete_omp_context); body = gimple_body (current_function_decl); scan_omp (body, NULL); gcc_assert (taskreg_nesting_level == 0); if (all_contexts->root) { struct gimplify_ctx gctx; if (task_shared_vars) push_gimplify_context (&gctx); lower_omp (body, NULL); if (task_shared_vars) pop_gimplify_context (NULL); } if (all_contexts) { splay_tree_delete (all_contexts); all_contexts = NULL; } BITMAP_FREE (task_shared_vars); return 0; } struct gimple_opt_pass pass_lower_omp = { { GIMPLE_PASS, "omplower", /* name */ NULL, /* gate */ execute_lower_omp, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_NONE, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_lomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0 /* todo_flags_finish */ } }; /* The following is a utility to diagnose OpenMP structured block violations. It is not part of the "omplower" pass, as that's invoked too late. It should be invoked by the respective front ends after gimplification. */ static splay_tree all_labels; /* Check for mismatched contexts and generate an error if needed. Return true if an error is detected. */ static bool diagnose_sb_0 (gimple_stmt_iterator *gsi_p, gimple branch_ctx, gimple label_ctx) { if (label_ctx == branch_ctx) return false; /* Previously we kept track of the label's entire context in diagnose_sb_[12] so we could traverse it and issue a correct "exit" or "enter" error message upon a structured block violation. We built the context by building a list with tree_cons'ing, but there is no easy counterpart in gimple tuples. It seems like far too much work for issuing exit/enter error messages. If someone really misses the distinct error message... patches welcome. */ #if 0 /* Try to avoid confusing the user by producing and error message with correct "exit" or "enter" verbiage. We prefer "exit" unless we can show that LABEL_CTX is nested within BRANCH_CTX. */ if (branch_ctx == NULL) exit_p = false; else { while (label_ctx) { if (TREE_VALUE (label_ctx) == branch_ctx) { exit_p = false; break; } label_ctx = TREE_CHAIN (label_ctx); } } if (exit_p) error ("invalid exit from OpenMP structured block"); else error ("invalid entry to OpenMP structured block"); #endif /* If it's obvious we have an invalid entry, be specific about the error. */ if (branch_ctx == NULL) error ("invalid entry to OpenMP structured block"); else /* Otherwise, be vague and lazy, but efficient. */ error ("invalid branch to/from an OpenMP structured block"); gsi_replace (gsi_p, gimple_build_nop (), false); return true; } /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record where each label is found. */ static tree diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p, struct walk_stmt_info *wi) { gimple context = (gimple) wi->info; gimple inner_context; gimple stmt = gsi_stmt (*gsi_p); *handled_ops_p = true; switch (gimple_code (stmt)) { WALK_SUBSTMTS; case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_CRITICAL: /* The minimal context here is just the current OMP construct. */ inner_context = stmt; wi->info = inner_context; walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi); wi->info = context; break; case GIMPLE_OMP_FOR: inner_context = stmt; wi->info = inner_context; /* gimple_omp_for_{index,initial,final} are all DECLs; no need to walk them. */ walk_gimple_seq (gimple_omp_for_pre_body (stmt), diagnose_sb_1, NULL, wi); walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi); wi->info = context; break; case GIMPLE_LABEL: splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt), (splay_tree_value) context); break; default: break; } return NULL_TREE; } /* Pass 2: Check each branch and see if its context differs from that of the destination label's context. */ static tree diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p, struct walk_stmt_info *wi) { gimple context = (gimple) wi->info; splay_tree_node n; gimple stmt = gsi_stmt (*gsi_p); *handled_ops_p = true; switch (gimple_code (stmt)) { WALK_SUBSTMTS; case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_CRITICAL: wi->info = stmt; walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi); wi->info = context; break; case GIMPLE_OMP_FOR: wi->info = stmt; /* gimple_omp_for_{index,initial,final} are all DECLs; no need to walk them. */ walk_gimple_seq (gimple_omp_for_pre_body (stmt), diagnose_sb_2, NULL, wi); walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi); wi->info = context; break; case GIMPLE_COND: { tree lab = gimple_cond_true_label (stmt); if (lab) { n = splay_tree_lookup (all_labels, (splay_tree_key) lab); diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL); } lab = gimple_cond_false_label (stmt); if (lab) { n = splay_tree_lookup (all_labels, (splay_tree_key) lab); diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL); } } break; case GIMPLE_GOTO: { tree lab = gimple_goto_dest (stmt); if (TREE_CODE (lab) != LABEL_DECL) break; n = splay_tree_lookup (all_labels, (splay_tree_key) lab); diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL); } break; case GIMPLE_SWITCH: { unsigned int i; for (i = 0; i < gimple_switch_num_labels (stmt); ++i) { tree lab = CASE_LABEL (gimple_switch_label (stmt, i)); n = splay_tree_lookup (all_labels, (splay_tree_key) lab); if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value)) break; } } break; case GIMPLE_RETURN: diagnose_sb_0 (gsi_p, context, NULL); break; default: break; } return NULL_TREE; } static unsigned int diagnose_omp_structured_block_errors (void) { struct walk_stmt_info wi; gimple_seq body = gimple_body (current_function_decl); all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0); memset (&wi, 0, sizeof (wi)); walk_gimple_seq (body, diagnose_sb_1, NULL, &wi); memset (&wi, 0, sizeof (wi)); wi.want_locations = true; walk_gimple_seq (body, diagnose_sb_2, NULL, &wi); splay_tree_delete (all_labels); all_labels = NULL; return 0; } static bool gate_diagnose_omp_blocks (void) { return flag_openmp != 0; } struct gimple_opt_pass pass_diagnose_omp_blocks = { { GIMPLE_PASS, "*diagnose_omp_blocks", /* name */ gate_diagnose_omp_blocks, /* gate */ diagnose_omp_structured_block_errors, /* execute */ NULL, /* sub */ NULL, /* next */ 0, /* static_pass_number */ TV_NONE, /* tv_id */ PROP_gimple_any, /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ } }; #include "gt-omp-low.h"
GB_unop__abs_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_fp64_fp64) // op(A') function: GB (_unop_tran__abs_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = fabs (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = fabs (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = fabs (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
HybridAdoptorBase.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // ////////////////////////////////////////////////////////////////////////////////////// /** @file HybridAdoptorBase.h * * Hybrid adoptor base class */ #ifndef QMCPLUSPLUS_HYBRID_ADOPTOR_BASE_H #define QMCPLUSPLUS_HYBRID_ADOPTOR_BASE_H #include <Particle/DistanceTableData.h> #include <QMCWaveFunctions/lcao/SoaSphericalTensor.h> #include <spline2/MultiBspline.hpp> namespace qmcplusplus { template<typename ST> struct AtomicOrbitalSoA { static const int D=3; using AtomicSplineType=typename bspline_traits<ST,1>::SplineType; using AtomicBCType=typename bspline_traits<ST,1>::BCType; using AtomicSingleSplineType=UBspline_1d_d; using PointType=TinyVector<ST,D>; using value_type=ST; using vContainer_type=aligned_vector<ST>; // near core cutoff ST rmin; // far from core cutoff, rmin_sqrt>=rmin ST rmin_sqrt; ST cutoff, cutoff_buffer, spline_radius, non_overlapping_radius; int spline_npoints, BaseN; int NumBands, Npad; PointType pos; const int lmax, lm_tot; SoaSphericalTensor<ST> Ylm; vContainer_type l_vals; vContainer_type r_power_minus_l; AtomicSplineType* MultiSpline; MultiBspline1D<ST>* SplineInst; vContainer_type localV, localG, localL; AtomicOrbitalSoA(int Lmax): Ylm(Lmax), MultiSpline(nullptr), SplineInst(nullptr), lmax(Lmax), lm_tot((Lmax+1)*(Lmax+1)) { r_power_minus_l.resize(lm_tot); l_vals.resize(lm_tot); for(int l=0; l<=lmax; l++) for(int m=-l; m<=l; m++) l_vals[l*(l+1)+m] = l; rmin = std::exp(std::log(std::numeric_limits<ST>::min())/std::max(Lmax,1)); rmin = std::max(rmin,std::numeric_limits<ST>::epsilon()); rmin_sqrt=std::max(rmin,std::sqrt(std::numeric_limits<ST>::epsilon())); } ~AtomicOrbitalSoA() { if(MultiSpline != nullptr) delete SplineInst; } inline void resizeStorage(size_t Nb) { NumBands=Nb; Npad=getAlignedSize<ST>(Nb); localV.resize(Npad*lm_tot); localG.resize(Npad*lm_tot); localL.resize(Npad*lm_tot); create_spline(); qmc_common.memory_allocated += SplineInst->sizeInByte(); } void bcast_tables(Communicate* comm) { chunked_bcast(comm, MultiSpline); } void gather_tables(Communicate* comm, std::vector<int> &offset_cplx, std::vector<int> &offset_real) { if(offset_cplx.size()) gatherv(comm, MultiSpline, Npad, offset_cplx); if(offset_real.size()) gatherv(comm, MultiSpline, Npad, offset_real); } template<typename PT, typename VT> inline void set_info(const PT& R, const VT& cutoff_in, const VT& cutoff_buffer_in, const VT& spline_radius_in, const VT& non_overlapping_radius_in, const int& spline_npoints_in) { pos[0]=R[0]; pos[1]=R[1]; pos[2]=R[2]; cutoff=cutoff_in; cutoff_buffer=cutoff_buffer_in; spline_radius=spline_radius_in; spline_npoints=spline_npoints_in; non_overlapping_radius=non_overlapping_radius_in; BaseN=spline_npoints+2; } inline void create_spline() { AtomicBCType bc; bc.lCode = FLAT; bc.rCode = NATURAL; Ugrid grid; grid.start = 0.0; grid.end = spline_radius; grid.num = spline_npoints; SplineInst = new MultiBspline1D<ST>(); SplineInst->create(grid, bc, lm_tot*Npad); MultiSpline=&(SplineInst->spline_m); } inline void flush_zero() { SplineInst->flush_zero(); } inline void set_spline(AtomicSingleSplineType* spline, int lm, int ispline) { SplineInst->copy_spline(spline, lm*Npad+ispline, 0, BaseN); } bool read_splines(hdf_archive& h5f) { einspline_engine<AtomicSplineType> bigtable(MultiSpline); int lmax_in, spline_npoints_in; ST spline_radius_in; bool success=true; success = success && h5f.read(lmax_in, "l_max"); success = success && h5f.read(spline_radius_in, "spline_radius"); success = success && h5f.read(spline_npoints_in, "spline_npoints"); if(lmax_in!=lmax) return false; if(spline_radius_in!=spline_radius) return false; if(spline_npoints_in!=spline_npoints) return false; return success && h5f.read(bigtable,"radial_spline"); } bool write_splines(hdf_archive& h5f) { bool success=true; success = success && h5f.write(spline_radius, "spline_radius"); success = success && h5f.write(spline_npoints, "spline_npoints"); success = success && h5f.write(lmax, "l_max"); success = success && h5f.write(pos, "position"); einspline_engine<AtomicSplineType> bigtable(MultiSpline); success = success && h5f.write(bigtable,"radial_spline"); return success; } //evaluate only V template<typename VV> inline void evaluate_v(const ST& r, const PointType& dr, VV& myV) { if (r>std::numeric_limits<ST>::epsilon()) Ylm.evaluateV(dr[0]/r, dr[1]/r, dr[2]/r); else Ylm.evaluateV(0,0,1); const ST* restrict Ylm_v=Ylm[0]; CONSTEXPR ST czero(0); ST* restrict val=myV.data(); ST* restrict local_val=localV.data(); std::fill(myV.begin(),myV.end(),czero); SplineInst->evaluate(r,localV); for(size_t lm=0; lm<lm_tot; lm++) { #pragma omp simd aligned(val,local_val) for(size_t ib=0; ib<myV.size(); ib++) val[ib]+=Ylm_v[lm]*local_val[ib]; local_val+=Npad; } } template<typename DISPL, typename VM> inline void evaluateValues(const DISPL& Displacements, const int center_idx, const ST& r, VM& multi_myV) { if(r<=std::numeric_limits<ST>::epsilon()) Ylm.evaluateV(0,0,1); const ST* restrict Ylm_v=Ylm[0]; const size_t m=multi_myV.cols(); CONSTEXPR ST czero(0); std::fill(multi_myV.begin(),multi_myV.end(),czero); SplineInst->evaluate(r,localV); for(int ivp=0; ivp<Displacements.size(); ivp++) { PointType dr=Displacements[ivp][center_idx]; if(r>std::numeric_limits<ST>::epsilon()) Ylm.evaluateV(-dr[0]/r, -dr[1]/r, -dr[2]/r); ST* restrict val=multi_myV[ivp]; ST* restrict local_val=localV.data(); for(size_t lm=0; lm<lm_tot; lm++) { #pragma omp simd aligned(val,local_val) for(size_t ib=0; ib<m; ib++) val[ib]+=Ylm_v[lm]*local_val[ib]; local_val+=Npad; } } } //evaluate VGL template<typename VV, typename GV> inline void evaluate_vgl(const ST& r, const PointType& dr, VV& myV, GV& myG, VV& myL) { ST drx, dry, drz, rhatx, rhaty, rhatz, rinv; if (r>rmin) { rinv=1.0/r; drx=dr[0]; dry=dr[1]; drz=dr[2]; rhatx=drx*rinv; rhaty=dry*rinv; rhatz=drz*rinv; } else { rinv=0; drx=dr[0]; dry=dr[1]; drz=dr[2]; } Ylm.evaluateVGL(drx, dry, drz); const ST* restrict Ylm_v=Ylm[0]; const ST* restrict Ylm_gx=Ylm[1]; const ST* restrict Ylm_gy=Ylm[2]; const ST* restrict Ylm_gz=Ylm[3]; ST* restrict g0=myG.data(0); ST* restrict g1=myG.data(1); ST* restrict g2=myG.data(2); CONSTEXPR ST czero(0), cone(1), chalf(0.5); std::fill(myV.begin(),myV.end(),czero); std::fill(g0,g0+Npad,czero); std::fill(g1,g1+Npad,czero); std::fill(g2,g2+Npad,czero); std::fill(myL.begin(),myL.end(),czero); ST* restrict val=myV.data(); ST* restrict lapl=myL.data(); ST* restrict local_val=localV.data(); ST* restrict local_grad=localG.data(); ST* restrict local_lapl=localL.data(); SplineInst->evaluate_vgl(r,localV,localG,localL); if(r>rmin_sqrt) { // far from core r_power_minus_l[0]=cone; ST r_power_temp=cone; for(int l=1; l<=lmax; l++) { r_power_temp*=rinv; for(int m=-l, lm=l*l; m<=l; m++,lm++) r_power_minus_l[lm]=r_power_temp; } for(size_t lm=0; lm<lm_tot; lm++) { const ST& l_val=l_vals[lm]; const ST& r_power=r_power_minus_l[lm]; const ST Ylm_rescale=Ylm_v[lm]*r_power; const ST rhat_dot_G = ( rhatx*Ylm_gx[lm] + rhaty*Ylm_gy[lm] + rhatz*Ylm_gz[lm] ) * r_power; #pragma omp simd aligned(val,g0,g1,g2,lapl,local_val,local_grad,local_lapl) for(size_t ib=0; ib<myV.size(); ib++) { const ST local_v=local_val[ib]; const ST local_g=local_grad[ib]; const ST local_l=local_lapl[ib]; // value const ST Vpart = l_val*rinv*local_v; val[ib] += Ylm_rescale*local_v; // grad const ST factor1 = local_g*Ylm_rescale; const ST factor2 = local_v*r_power; const ST factor3 = -Vpart*Ylm_rescale; g0[ib] += factor1 * rhatx + factor2 * Ylm_gx[lm] + factor3 * rhatx; g1[ib] += factor1 * rhaty + factor2 * Ylm_gy[lm] + factor3 * rhaty; g2[ib] += factor1 * rhatz + factor2 * Ylm_gz[lm] + factor3 * rhatz; // laplacian lapl[ib] += (local_l + ( local_g * ( 2 - l_val ) - Vpart ) * rinv) * Ylm_rescale + (local_g - Vpart ) * rhat_dot_G; } local_val+=Npad; local_grad+=Npad; local_lapl+=Npad; } } else if(r>rmin) { // the possibility of reaching here is very very low std::cout << "Warning: an electron is very close to an ion, distance=" << r << " be careful!" << std::endl; // near core, kill divergence in the laplacian r_power_minus_l[0]=cone; ST r_power_temp=cone; for(int l=1; l<=lmax; l++) { r_power_temp*=rinv; for(int m=-l, lm=l*l; m<=l; m++,lm++) r_power_minus_l[lm]=r_power_temp; } for(size_t lm=0; lm<lm_tot; lm++) { const ST& l_val=l_vals[lm]; const ST& r_power=r_power_minus_l[lm]; const ST Ylm_rescale=Ylm_v[lm]*r_power; const ST rhat_dot_G = (Ylm_gx[lm] * rhatx + Ylm_gy[lm] * rhaty + Ylm_gz[lm] * rhatz ) * r_power * r; #pragma omp simd aligned(val,g0,g1,g2,lapl,local_val,local_grad,local_lapl) for(size_t ib=0; ib<myV.size(); ib++) { const ST local_v=local_val[ib]; const ST local_g=local_grad[ib]; const ST local_l=local_lapl[ib]; // value const ST Vpart = Ylm_rescale*local_v; val[ib] += Vpart; // grad const ST factor1 = local_g*Ylm_rescale; const ST factor2 = local_v*r_power; const ST factor3 = -l_val*Vpart*rinv; g0[ib] += factor1 * rhatx + factor2 * Ylm_gx[lm] + factor3 * rhatx; g1[ib] += factor1 * rhaty + factor2 * Ylm_gy[lm] + factor3 * rhaty; g2[ib] += factor1 * rhatz + factor2 * Ylm_gz[lm] + factor3 * rhatz; // laplacian lapl[ib] += local_l * (cone - chalf *l_val) * ( 3 * Ylm_rescale + rhat_dot_G ); } local_val+=Npad; local_grad+=Npad; local_lapl+=Npad; } } else { std::cout << "Warning: an electron is on top of an ion!" << std::endl; // strictly zero #pragma omp simd aligned(val,lapl,local_val,local_lapl) for(size_t ib=0; ib<myV.size(); ib++) { // value val[ib] = Ylm_v[0]*local_val[ib]; // laplacian lapl[ib] = local_lapl[ib] * static_cast<ST>(3) * Ylm_v[0]; } local_val+=Npad; local_grad+=Npad; local_lapl+=Npad; if(lm_tot>0) { //std::cout << std::endl; for(size_t lm=1; lm<4; lm++) { #pragma omp simd aligned(g0,g1,g2,local_grad) for(size_t ib=0; ib<myV.size(); ib++) { const ST local_g=local_grad[ib]; // grad g0[ib] += local_g * Ylm_gx[lm]; g1[ib] += local_g * Ylm_gy[lm]; g2[ib] += local_g * Ylm_gz[lm]; } local_grad+=Npad; } } } } template<typename VV, typename GV, typename HT> void evaluate_vgh(const ST& r, const PointType& dr, VV& myV, GV& myG, HT& myH) { //Needed to do tensor product here APP_ABORT("AtomicOrbitalSoA::evaluate_vgh"); } }; /** adoptor class to match * */ template<typename ST> struct HybridAdoptorBase { static const int D=3; using PointType=typename AtomicOrbitalSoA<ST>::PointType; using RealType=typename DistanceTableData::RealType; // atomic centers std::vector<AtomicOrbitalSoA<ST> > AtomicCenters; ///table index int myTableID; //mapping supercell to primitive cell std::vector<int> Super2Prim; // r, dr for distance table RealType dist_r; DistanceTableData::PosType dist_dr; // for APBC PointType r_image; // smooth function derivatives RealType df_dr, d2f_dr2; HybridAdoptorBase() { } void set_info(const ParticleSet& ions, ParticleSet& els, const std::vector<int>& mapping) { myTableID=els.addTable(ions,DT_SOA); Super2Prim=mapping; } inline void resizeStorage(size_t Nb) { for(int ic=0; ic<AtomicCenters.size(); ic++) AtomicCenters[ic].resizeStorage(Nb); } void bcast_tables(Communicate* comm) { for(int ic=0; ic<AtomicCenters.size(); ic++) AtomicCenters[ic].bcast_tables(comm); } void gather_atomic_tables(Communicate* comm, std::vector<int> &offset_cplx, std::vector<int> &offset_real) { for(int ic=0; ic<AtomicCenters.size(); ic++) AtomicCenters[ic].gather_tables(comm, offset_cplx, offset_real); } inline void flush_zero() { for(int ic=0; ic<AtomicCenters.size(); ic++) AtomicCenters[ic].flush_zero(); } bool read_splines(hdf_archive& h5f) { bool success=true; size_t ncenter; success = success && h5f.push("atomic_centers",false); success = success && h5f.read(ncenter,"number_of_centers"); if(!success) return success; if(ncenter!=AtomicCenters.size()) success=false; // read splines of each center for(int ic=0; ic<AtomicCenters.size(); ic++) { std::ostringstream gname; gname << "center_" << ic; success = success && h5f.push(gname.str().c_str(),false); success = success && AtomicCenters[ic].read_splines(h5f); h5f.pop(); } h5f.pop(); return success; } bool write_splines(hdf_archive& h5f) { bool success=true; int ncenter=AtomicCenters.size(); success = success && h5f.push("atomic_centers",true); success = success && h5f.write(ncenter,"number_of_centers"); // write splines of each center for(int ic=0; ic<AtomicCenters.size(); ic++) { std::ostringstream gname; gname << "center_" << ic; success = success && h5f.push(gname.str().c_str(),true); success = success && AtomicCenters[ic].write_splines(h5f); h5f.pop(); } h5f.pop(); return success; } template<typename Cell> inline int get_bc_sign(const PointType& r, const Cell& PrimLattice, TinyVector<int,D>& HalfG) { int bc_sign=0; PointType shift_unit = PrimLattice.toUnit(r-r_image); for(int i=0; i<D; i++) { ST img = round(shift_unit[i]); bc_sign += HalfG[i] * (int)img; } return bc_sign; } //evaluate only V template<typename VV> inline RealType evaluate_v(const ParticleSet& P, const int iat, VV& myV) { const auto* ei_dist=P.DistTables[myTableID]; const int center_idx=ei_dist->get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl==iat); if(center_idx<0) abort(); auto& myCenter=AtomicCenters[Super2Prim[center_idx]]; if ( dist_r < myCenter.cutoff ) { PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]); r_image=myCenter.pos+dr; myCenter.evaluate_v(dist_r, dr, myV); return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r); } return RealType(-1); } /* check if the batched algorithm is safe to operate * @param VP virtual particle set * @return true if it is safe * * When the reference electron in the NLPP evaluation has a distance larger than the non overlapping radius of the reference center. * Some qudrature points may get its SPOs evaluated from the nearest center which is not the reference center. * The batched algorthm forces the evaluation on the reference center and introduce some error. * In this case, the non-batched algorithm should be used. */ bool is_batched_safe(const VirtualParticleSet& VP) { const int center_idx=VP.refSourcePtcl; auto& myCenter=AtomicCenters[Super2Prim[center_idx]]; return VP.refPS.DistTables[myTableID]->Distances[VP.refPtcl][center_idx] < myCenter.non_overlapping_radius; } // C2C, C2R cases template<typename VM> inline RealType evaluateValuesC2X(const VirtualParticleSet& VP, VM& multi_myV) { const int center_idx=VP.refSourcePtcl; dist_r = VP.refPS.DistTables[myTableID]->Distances[VP.refPtcl][center_idx]; auto& myCenter=AtomicCenters[Super2Prim[center_idx]]; if ( dist_r < myCenter.cutoff ) { myCenter.evaluateValues(VP.DistTables[myTableID]->Displacements, center_idx, dist_r, multi_myV); return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r); } return RealType(-1); } // R2R case template<typename VM, typename Cell, typename SV> inline RealType evaluateValuesR2R(const VirtualParticleSet& VP, const Cell& PrimLattice, TinyVector<int,D>& HalfG, VM& multi_myV, SV& bc_signs) { const int center_idx=VP.refSourcePtcl; dist_r = VP.refPS.DistTables[myTableID]->Distances[VP.refPtcl][center_idx]; auto& myCenter=AtomicCenters[Super2Prim[center_idx]]; if ( dist_r < myCenter.cutoff ) { const auto &displ=VP.DistTables[myTableID]->Displacements; for(int ivp=0; ivp<VP.getTotalNum(); ivp++) { r_image=myCenter.pos-displ[ivp][center_idx]; bc_signs[ivp]=get_bc_sign(VP.R[ivp], PrimLattice, HalfG);; } myCenter.evaluateValues(displ, center_idx, dist_r, multi_myV); return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r); } return RealType(-1); } //evaluate only VGL template<typename VV, typename GV> inline RealType evaluate_vgl(const ParticleSet& P, const int iat, VV& myV, GV& myG, VV& myL) { const auto* ei_dist=P.DistTables[myTableID]; const int center_idx=ei_dist->get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl==iat); if(center_idx<0) abort(); auto& myCenter=AtomicCenters[Super2Prim[center_idx]]; if ( dist_r < myCenter.cutoff ) { PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]); r_image=myCenter.pos+dr; myCenter.evaluate_vgl(dist_r, dr, myV, myG, myL); return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r); } return RealType(-1); } //evaluate only VGH template<typename VV, typename GV, typename HT> inline RealType evaluate_vgh(const ParticleSet& P, const int iat, VV& myV, GV& myG, HT& myH) { const auto* ei_dist=P.DistTables[myTableID]; const int center_idx=ei_dist->get_first_neighbor(iat, dist_r, dist_dr, P.activePtcl==iat); if(center_idx<0) abort(); auto& myCenter=AtomicCenters[Super2Prim[center_idx]]; if ( dist_r < myCenter.cutoff ) { PointType dr(-dist_dr[0], -dist_dr[1], -dist_dr[2]); r_image=myCenter.pos+dr; myCenter.evaluate_vgh(dist_r, dr, myV, myG, myH); return smooth_function(myCenter.cutoff_buffer, myCenter.cutoff, dist_r); } return RealType(-1); } inline RealType smooth_function(const ST &cutoff_buffer, const ST &cutoff, RealType r) { const RealType cone(1), ctwo(2), chalf(0.5); if (r<cutoff_buffer) return cone; const RealType scale=ctwo/(cutoff-cutoff_buffer); const RealType x=(r-cutoff_buffer)*scale-cone; const RealType cosh_x=std::cosh(x); const RealType tanh_x=std::tanh(x); df_dr=-chalf/(cosh_x*cosh_x)*scale; d2f_dr2=-ctwo*tanh_x*df_dr*scale; return chalf*(cone-tanh_x); } }; } #endif
residualbased_incrementalupdate_static_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_RESIDUALBASED_INCREMENTALUPDATE_STATIC_SCHEME_H ) #define KRATOS_RESIDUALBASED_INCREMENTALUPDATE_STATIC_SCHEME_H /* System includes */ /* External includes */ /* Project includes */ #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedIncrementalUpdateStaticScheme * @ingroup KratosCore * @brief This class provides the implementation of a static scheme * @details The only operation done in this scheme is the update of the database, no predict is done * @tparam TSparseSpace The sparse space considered * @tparam TDenseSpace The dense space considered * @see Scheme * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace //= DenseSpace<double> > class ResidualBasedIncrementalUpdateStaticScheme : public Scheme<TSparseSpace,TDenseSpace> { public: ///@name Type Definitions ///@{ /// Pointer definition of ResidualBasedIncrementalUpdateStaticScheme KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedIncrementalUpdateStaticScheme); /// Base class definition typedef Scheme<TSparseSpace,TDenseSpace> BaseType; /// DoF array type definition typedef typename BaseType::DofsArrayType DofsArrayType; /// Data type definition typedef typename BaseType::TDataType TDataType; /// Matrix type definition typedef typename BaseType::TSystemMatrixType TSystemMatrixType; /// Vector type definition typedef typename BaseType::TSystemVectorType TSystemVectorType; /// Local system matrix type definition typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; /// Local system vector type definition typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; /// Elements containers definition typedef ModelPart::ElementsContainerType ElementsArrayType; /// Conditions containers definition typedef ModelPart::ConditionsContainerType ConditionsArrayType; /// The definition of the vector containing the equation ids typedef Element::EquationIdVectorType EquationIdVectorType; ///@} ///@name Life Cycle ///@{ /** * @brief Constructor. The pseudo static scheme (parameters) * @param ThisParameters Dummy parameters */ explicit ResidualBasedIncrementalUpdateStaticScheme(Parameters ThisParameters) : BaseType() { // Validate default parameters Parameters default_parameters = Parameters(R"( { })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); } /** Default onstructor. */ explicit ResidualBasedIncrementalUpdateStaticScheme() : BaseType() {} /** Copy Constructor. */ explicit ResidualBasedIncrementalUpdateStaticScheme(ResidualBasedIncrementalUpdateStaticScheme& rOther) :BaseType(rOther) { } /** Destructor. */ ~ResidualBasedIncrementalUpdateStaticScheme() override {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Performing the update of the solution. * @param rModelPart The model part of the problem to solve * @param rDofSet Set of all primary variables * @param rA LHS matrix * @param rDx Incremental update of primary variables * @param rb RHS Vector */ void Update( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY mpDofUpdater->UpdateDofs(rDofSet, rDx); KRATOS_CATCH("") } /** * @brief Performing the prediction of the solution. * @param rModelPart The model part of the problem to solve * @param rA LHS matrix * @param rDx Incremental update of primary variables * @param rb RHS Vector */ void Predict( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY KRATOS_CATCH("") } /** * @brief It initializes a non-linear iteration (for the element) * @param rModelPart The model of the problem to solve * @param rA LHS matrix * @param rDx Incremental update of primary variables * @param rb RHS Vector */ void InitializeNonLinIteration( ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY; ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Definition of the first element iterator const auto it_elem_begin = rModelPart.ElementsBegin(); #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) { auto it_elem = it_elem_begin + i; it_elem->InitializeNonLinearIteration(r_current_process_info); } // Definition of the first condition iterator const auto it_cond_begin = rModelPart.ConditionsBegin(); #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) { auto it_cond = it_cond_begin + i; it_cond->InitializeNonLinearIteration(r_current_process_info); } // Definition of the first constraint iterator const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin(); #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) { auto it_const = it_const_begin + i; it_const->InitializeNonLinearIteration(r_current_process_info); } KRATOS_CATCH( "" ); } /** * @brief It initializes a non-linear iteration (for an individual condition) * @param rCurrentConditiont The condition to compute * @param rCurrentProcessInfo The current process info instance */ void InitializeNonLinearIteration( Condition::Pointer rCurrentCondition, ProcessInfo& rCurrentProcessInfo ) override { (rCurrentCondition)->InitializeNonLinearIteration(rCurrentProcessInfo); } /** * @brief It initializes a non-linear iteration (for an individual element) * @param pCurrentElement The element to compute * @param rCurrentProcessInfo The current process info instance */ void InitializeNonLinearIteration( Element::Pointer pCurrentElement, ProcessInfo& rCurrentProcessInfo ) override { (pCurrentElement)->InitializeNonLinearIteration(rCurrentProcessInfo); } /** * @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme. * @details It "asks" the matrix needed to the element and performs the operations needed to introduce the selected time integration scheme. This function calculates at the same time the contribution to the LHS and to the RHS of the system * @param pCurrentElement The element to compute * @param rLHSContribution The LHS matrix contribution * @param rRHSContribution The RHS vector contribution * @param EquationId The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ void CalculateSystemContributions( Element::Pointer pCurrentElement, LocalSystemMatrixType& rLHSContribution, LocalSystemVectorType& rRHSContribution, EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo ) override { KRATOS_TRY (pCurrentElement)->CalculateLocalSystem(rLHSContribution,rRHSContribution, rCurrentProcessInfo); (pCurrentElement)->EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH("") } /** * @brief Functions totally analogous to the precedent but applied to the "condition" objects * @param pCurrentCondition The condition to compute * @param rLHSContribution The LHS matrix contribution * @param rRHSContribution The RHS vector contribution * @param EquationId The ID's of the condition degrees of freedom * @param rCurrentProcessInfo The current process info instance */ void Condition_CalculateSystemContributions( Condition::Pointer rCurrentCondition, LocalSystemMatrixType& rLHSContribution, LocalSystemVectorType& rRHSContribution, EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo ) override { KRATOS_TRY (rCurrentCondition)->CalculateLocalSystem(rLHSContribution, rRHSContribution, rCurrentProcessInfo); (rCurrentCondition)->EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH("") } /** * @brief This function is designed to calculate just the RHS contribution * @param pCurrentElement The element to compute * @param rRHSContribution The RHS vector contribution * @param EquationId The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ void Calculate_RHS_Contribution( Element::Pointer pCurrentElement, LocalSystemVectorType& rRHSContribution, EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo ) override { KRATOS_TRY (pCurrentElement)->CalculateRightHandSide(rRHSContribution, rCurrentProcessInfo); (pCurrentElement)->EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH("") } /** * @brief Functions totally analogous to the precedent but applied to the "condition" objects * @param pCurrentCondition The condition to compute * @param rRHSContribution The RHS vector contribution * @param EquationId The ID's of the condition degrees of freedom * @param rCurrentProcessInfo The current process info instance */ void Condition_Calculate_RHS_Contribution( Condition::Pointer rCurrentCondition, LocalSystemVectorType& rRHSContribution, EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo ) override { KRATOS_TRY (rCurrentCondition)->CalculateRightHandSide(rRHSContribution, rCurrentProcessInfo); (rCurrentCondition)->EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH("") } /** * @brief This function is designed to calculate just the LHS contribution * @param pCurrentElement The element to compute * @param rLHSContribution The RHS vector contribution * @param EquationId The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ void Calculate_LHS_Contribution( Element::Pointer pCurrentElement, LocalSystemMatrixType& rLHSContribution, EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo ) override { KRATOS_TRY (pCurrentElement)->CalculateLeftHandSide(rLHSContribution, rCurrentProcessInfo); (pCurrentElement)->EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH("") } /** * @brief Liberate internal storage. */ void Clear() override { this->mpDofUpdater->Clear(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedIncrementalUpdateStaticScheme"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); /// The DoF updater, which will update the values ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class ResidualBasedIncrementalUpdateStaticScheme } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_INCREMENTALUPDATE_STATIC_SCHEME_H defined */
dufu17r.c
/* * Date: 11 December 2015 * Contact: Thomas Peyrin - thomas.peyrin@gmail.com */ /* * Boomerang cryptanalysis of SKINNY * Date: March 21, 2020 * Author: Hosein Hadipour * Contact: hsn.hadipour@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include <omp.h> #include <stdbool.h> typedef unsigned long long uint64_t; // #define DEBUG 1 #define Nthreads 16 // Number of parallel threads utilized in this program #define NumOfExperiments 100 // Number of independent experiments #define STEP ((1 << 10) - 1) // Table that encodes the parameters of the various Skinny versions: // (block size, key size, number of rounds) //Skinny-64-64: 32 rounds //Skinny-64-128: 36 rounds //Skinny-64-192: 40 rounds //Skinny-128-128: 40 rounds //Skinny-128-256: 48 rounds //Skinny-128-384: 56 rounds int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}}; // Packing of data is done as follows (state[i][j] stands for row i and column j): // 0 1 2 3 // 4 5 6 7 // 8 9 10 11 //12 13 14 15 // 4-bit Sbox const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15}; const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15}; // 8-bit Sbox const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff}; const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff}; // ShiftAndSwitchRows permutation const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12}; const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14}; // Tweakey permutation const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7}; const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1}; // round constants const unsigned char RC[62] = { 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A, 0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13, 0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28, 0x10, 0x20}; FILE *fic; void init_prng(int offset) { //int initial_seed = 0x5EC7F2B0; //int initial_seed = 0x30051991; My birthday! unsigned int initial_seed = 10*time(NULL) + 11*offset; srand(initial_seed); // Initialization, should only be called once. int r = rand(); printf("[+] PRNG initialized to 0x%08X\n", initial_seed); } void display_matrix(unsigned char state[4][4], int ver) { int i; unsigned char input[16]; if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); for (i = 0; i < 8; i++) fprintf(fic, "%02x", input[i]); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; for (i = 0; i < 16; i++) fprintf(fic, "%02x", input[i]); } } void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int k; fprintf(fic, "S = "); display_matrix(state, ver); for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { fprintf(fic, " - TK%i = ", k + 1); display_matrix(keyCells[k], ver); } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the TWEAKEY permutation pos = TWEAKEY_P[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { //application of LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function} void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse TWEAKEY permutation pos = TWEAKEY_P_inv[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 2; i <= 3; i++) { for (j = 0; j < 4; j++) { //application of inverse LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } } // Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state void AddConstants(unsigned char state[4][4], int r) { state[0][0] ^= (RC[r] & 0xf); state[1][0] ^= ((RC[r] >> 4) & 0x3); state[2][0] ^= 0x2; } // apply the 4-bit Sbox void SubCell4(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4[state[i][j]]; } // apply the 4-bit inverse Sbox void SubCell4_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4_inv[state[i][j]]; } // apply the 8-bit Sbox void SubCell8(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8[state[i][j]]; } // apply the 8-bit inverse Sbox void SubCell8_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8_inv[state[i][j]]; } // Apply the ShiftRows function void ShiftRows(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the ShiftRows permutation pos = P[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the inverse ShiftRows function void ShiftRows_inv(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse ShiftRows permutation pos = P_inv[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the linear diffusion matrix //M = //1 0 1 1 //1 0 0 0 //0 1 1 0 //1 0 1 0 void MixColumn(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { state[1][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[3][j] ^= state[2][j]; temp = state[3][j]; state[3][j] = state[2][j]; state[2][j] = state[1][j]; state[1][j] = state[0][j]; state[0][j] = temp; } } // Apply the inverse linear diffusion matrix void MixColumn_inv(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { temp = state[3][j]; state[3][j] = state[0][j]; state[0][j] = state[1][j]; state[1][j] = state[2][j]; state[2][j] = temp; state[3][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[1][j] ^= state[2][j]; } } // decryption function of Skinny void dec(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char dummy[4][4] = {{0}}; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } for (i = r - 1; i >= 0; i--) { AddKey(dummy, keyCells, ver); } #ifdef DEBUG fprintf(fic, "DEC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = r - 1; i >= 0; i--) { MixColumn_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey_inv(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) SubCell4_inv(state); else SubCell8_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } #ifdef DEBUG fprintf(fic, "DEC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // encryption function of Skinny void enc(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } #ifdef DEBUG fprintf(fic, "ENC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = 0; i < r; i++) { if (versions[ver][0] == 64) SubCell4(state); else SubCell8(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after SubCell: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddConstants: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddKey: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif MixColumn(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after MixColumn: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } //The last subtweakey should not be added #ifdef DEBUG fprintf(fic, "ENC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // generate test vectors for all the versions of Skinny void TestVectors(int ver) { unsigned char p[16]; unsigned char c[16]; unsigned char k[48]; int n; for (n = 1; n < 10; n++) { int i; for (i = 0; i < (versions[ver][0] >> 3); i++) c[i] = p[i] = rand() & 0xff; for (i = 0; i < (versions[ver][0] >> 3); i++) printf("%02x", p[i]); printf("\n"); for (i = 0; i < (versions[ver][1] >> 3); i++) k[i] = rand() & 0xff; fprintf(fic, "TK = "); for (i = 0; i < (versions[ver][1] >> 3); i++) fprintf(fic, "%02x", k[i]); fprintf(fic, "\n"); fprintf(fic, "P = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", p[i]); fprintf(fic, "\n"); enc(c, k, ver, 10); fprintf(fic, "C = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n"); dec(c, k, ver, 10); fprintf(fic, "P' = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n\n"); } } int boomerang(int r, int ver, unsigned long long N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { int i; unsigned char p1[16], p2[16]; unsigned char c3[16], c4[16]; unsigned char k1[48], k2[48], k3[48], k4[48]; // randomly choose k1 for (i = 0; i < (versions[ver][1] >> 3); i++) k1[i] = rand() & 0xff; // derive k2 for (i = 0; i < (versions[ver][1] >> 3); i++) k2[i] = k1[i] ^ dk1[i]; // derive k3 for (i = 0; i < (versions[ver][1] >> 3); i++) k3[i] = k1[i] ^ dk2[i]; // derive k4 for (i = 0; i < (versions[ver][1] >> 3); i++) k4[i] = k2[i] ^ dk2[i]; int num = 0; for (uint64_t t = 0; t < N3; t++) { // randomly choose p1 for (i = 0; i < (versions[ver][0] >> 3); i++) p1[i] = rand() & 0xff; // derive p2 for (i = 0; i < (versions[ver][0] >> 3); i++) p2[i] = p1[i] ^ dp[i]; enc(p1, k1, ver, r); enc(p2, k2, ver, r); // derive c3 for (i = 0; i < (versions[ver][0] >> 3); i++) c3[i] = p1[i] ^ dc[i]; // derive c4 for (i = 0; i < (versions[ver][0] >> 3); i++) c4[i] = p2[i] ^ dc[i]; dec(c3, k3, ver, r); dec(c4, k4, ver, r); bool flag = 1; for (i = 0; i < (versions[ver][0] >> 3); i++) if ((c3[i] ^ c4[i]) != dp[i]) flag = 0; if (flag) { num++; } } return num; } double send_boomerangs(int R, int ver, int N1, uint64_t N2, uint64_t N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { // Parallel execution int NUM[N1]; int counter; printf("#Rounds: %d rounds\n", R); printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %llu * %llu = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2)); printf("#Queries per thread = (#Bunches per thread) * (#Queries per bunch) = %llu * %llu = 2^(%f)\n", N2, N3, log(N2 * N3) / log(2)); clock_t clock_timer; double wall_timer; clock_timer = clock(); wall_timer = omp_get_wtime(); omp_set_num_threads(N1); #pragma omp parallel for for (counter = 0; counter < N1; counter++) { int num = 0; int ID = omp_get_thread_num(); init_prng(ID); for (uint64_t j = 0; j < N2; j++) { num += boomerang(R, ver, N3, dp, dc, dk1, dk2); if ((j & STEP) == 0){ printf("PID: %d \t Bunch Number: %llu/%llu\n", ID, j, N2); } } NUM[ID] = num; } printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC); printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer); double sum = 0; double sum_temp = 1; for (int i = 0; i < N1; i++) sum += NUM[i]; printf("sum = %f\n", sum); sum_temp = (double)(N1 * N2 * N3) / sum; printf("2^(-%f)\n\n", log(sum_temp) / log(2)); printf("##########################\n"); return sum; } void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16]) { for (int i = 0; i < (versions[ver][0] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48]) { for (int i = 0; i < (versions[ver][1] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } int main() { // srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand(); // init_prng(1); // //test all versions of Skinny // for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++) // { // sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]); // fic = fopen(name, "w"); // fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]); // TestVectors(i); // fclose(fic); // printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]); // } unsigned char dp[16]; unsigned char dc[16]; unsigned char dk1[48]; unsigned char dk2[48]; // ####################################################################################################### // ####################################################################################################### // ############################## User must change only the following lines ############################## int R = 17; // Number of rounds int ver = 1; // Determine the version: // [0 = Skinny-64-64] // [1 = Skinny-64-128] // [2 = Skinny-64-192] // [3 = Skinny-128-128] // [4 = Skinny-128-256] // [5 = Skinny-128-384] char dp_str[] = "0000000000000800"; char dc_str[] = "0200000002000200"; char dk1_str[] = "000000000C000000000000000F000000"; char dk2_str[] = "00000000000000400000000000000070"; // ####################################################################################################### // ####################################################################################################### convert_hexstr_to_statearray(ver, dp_str, dp); convert_hexstr_to_statearray(ver, dc_str, dc); convert_hexstr_to_tweakarray(ver, dk1_str, dk1); convert_hexstr_to_tweakarray(ver, dk2_str, dk2); //########################## Number of queries ######################### int N1 = Nthreads; // Number of parallel threads : N1 int deg1 = 13; int deg2 = 13; int N2 = 1 << deg1; // Number of bunches per thread : N2 = 2^(deg1) int N3 = 1 << deg2; // Number of queries per bunche : N3 = 2^(deg2) //################### Number of total queries : N1*N2*N3 ############### char all_results[NumOfExperiments][20]; double sum = 0; double sum_temp = 0; for (int i = 0; i < NumOfExperiments; i++) { printf("Experiment Number %d:\n", i); sum_temp = send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2); sum += sum_temp; sum_temp = (double)(N1 * N2 * N3) / sum_temp; sprintf(all_results[i], "2^(-%0.2f), ", log(sum_temp) / log(2)); } printf("A summary of all results:\n"); for (int i = 0; i < NumOfExperiments; i++) { printf("%s", all_results[i]); } printf("\n##########################\nAverage = 2^(-%0.4f)\n", (log(NumOfExperiments) + log(N1) + log(N2) + log(N3) - log(sum))/log(2)); }
bml_submatrix_ellsort_typed.c
#ifdef BML_USE_MAGMA #include "magma_v2.h" #endif #include "../../macros.h" #include "../../typed.h" #include "../bml_allocate.h" #include "../bml_logger.h" #include "../bml_submatrix.h" #include "../bml_types.h" #include "../dense/bml_allocate_dense.h" #include "bml_allocate_ellsort.h" #include "bml_submatrix_ellsort.h" #include "bml_types_ellsort.h" #include <complex.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Determine element indices for submatrix, given a set of nodes/orbitals. * * \ingroup submatrix_group_C * * \param A Hamiltonian matrix A * \param B Graph matrix B * \param nodelist List of node/orbital indeces * \param nsize Size of nodelist * \param core_halo_index List of core+halo indeces * \param vsize Size of core_halo_index and number of cores * \param double_jump_flag Flag to use double jump (0=no, 1=yes) */ void TYPED_FUNC( bml_matrix2submatrix_index_ellsort) ( bml_matrix_ellsort_t * A, bml_matrix_ellsort_t * B, int *nodelist, int nsize, int *core_halo_index, int *vsize, int double_jump_flag) { int l, ll, ii, ls, k; int A_N = A->N; int A_M = A->M; int *A_nnz = A->nnz; int *A_index = A->index; int B_N = B->N; int B_M = B->M; int *B_nnz = B->nnz; int *B_index = B->index; int ix[A_N]; memset(ix, 0, A_N * sizeof(int)); l = 0; ll = 0; // Cores are first followed by halos for (int j = 0; j < nsize; j++) { ii = nodelist[j]; if (ix[ii] == 0) { ix[ii] = ii + 1; core_halo_index[l] = ii; l++; ll++; } } // Collect halo indeces from graph for (int j = 0; j < nsize; j++) { ii = nodelist[j]; for (int jp = 0; jp < B_nnz[ii]; jp++) { k = B_index[ROWMAJOR(ii, jp, B_N, B_M)]; if (ix[k] == 0) { ix[k] = ii + 1; core_halo_index[l] = k; l++; } } } // Add more halo elements from H for (int j = 0; j < nsize; j++) { ii = nodelist[j]; for (int jp = 0; jp < A_nnz[ii]; jp++) { k = A_index[ROWMAJOR(ii, jp, A_N, A_M)]; if (ix[k] == 0) { ix[k] = ii + 1; core_halo_index[l] = k; l++; } } } // Perform a "double jump" for extra halo elements // based on graph, like performing a symbolic X^2 if (double_jump_flag == 1) { ls = l; for (int j = 0; j < ls; j++) { ii = core_halo_index[j]; for (int jp = 0; jp < B_nnz[ii]; jp++) { k = B_index[ROWMAJOR(ii, jp, B_N, B_M)]; if (ix[k] == 0) { ix[k] = ii + 1; core_halo_index[l] = k; l++; } } } } vsize[0] = l; vsize[1] = ll; } /** Determine element indices for submatrix, given a set of nodes/orbitals. * * \ingroup submatrix_group_C * * \param B Graph matrix B * \param nodelist List of node/orbital indeces * \param nsize Size of nodelist * \param core_halo_index List of core+halo indeces * \param vsize Size of core_halo_index and number of cores * \param double_jump_flag Flag to use double jump (0=no, 1=yes) */ void TYPED_FUNC( bml_matrix2submatrix_index_graph_ellsort) ( bml_matrix_ellsort_t * B, int *nodelist, int nsize, int *core_halo_index, int *vsize, int double_jump_flag) { int l, ll, ii, ls, k; int B_N = B->N; int B_M = B->M; int *B_index = B->index; int *B_nnz = B->nnz; int ix[B_N]; memset(ix, 0, B_N * sizeof(int)); l = 0; ll = 0; // Cores are first followed by halos for (int j = 0; j < nsize; j++) { ii = nodelist[j]; if (ix[ii] == 0) { ix[ii] = ii + 1; core_halo_index[l] = ii; l++; ll++; } } // Collext halo indeces from graph for (int j = 0; j < nsize; j++) { ii = nodelist[j]; for (int jp = 0; jp < B_nnz[ii]; jp++) { k = B_index[ROWMAJOR(ii, jp, B_N, B_M)]; if (ix[k] == 0) { ix[k] = ii + 1; core_halo_index[l] = k; l++; } } } // Use graph for double jumps if (double_jump_flag == 1) { ls = l; for (int j = 0; j < ls; j++) { ii = core_halo_index[j]; for (int jp = 0; jp < B_nnz[ii]; jp++) { k = B_index[ROWMAJOR(ii, jp, B_N, B_M)]; if (ix[k] == 0) { ix[k] = ii + 1; core_halo_index[l] = k; l++; } } } } vsize[0] = l; vsize[1] = ll; } /** Extract a submatrix from a matrix given a set of core+halo rows. * * \ingroup submatrix_group_C * * \param A Matrix A * \param B Submatrix B * \param core_halo_index Set of row indeces for submatrix * \param llsize Number of indeces */ void TYPED_FUNC( bml_matrix2submatrix_ellsort) ( bml_matrix_ellsort_t * A, bml_matrix_dense_t * B, int *core_halo_index, int lsize) { REAL_T *rvalue; int B_N = B->N; #ifdef BML_USE_MAGMA REAL_T *B_matrix = bml_allocate_memory(sizeof(REAL_T) * B->N * B->N); #else REAL_T *B_matrix = B->matrix; #endif #pragma omp parallel for \ private(rvalue) \ shared(core_halo_index) \ shared(A, B_matrix, B_N) for (int jb = 0; jb < lsize; jb++) { rvalue = TYPED_FUNC(bml_getVector_ellsort) (A, core_halo_index, core_halo_index[jb], lsize); for (int j = 0; j < lsize; j++) { B_matrix[ROWMAJOR(jb, j, B_N, B_N)] = rvalue[j]; } free(rvalue); } #ifdef BML_USE_MAGMA MAGMA(setmatrix) (B_N, B_N, (MAGMA_T *) B_matrix, B_N, B->matrix, B->ld, bml_queue()); bml_free_memory(B_matrix); #endif } /** Assemble submatrix into a full matrix based on core+halo indeces. * * \ingroup submatrix_group_C * * \param A Submatrix A * \param B Matrix B * \param core_halo_index Set of submatrix row indeces * \param lsize Number of indeces * \param llsize Number of core positions */ void TYPED_FUNC( bml_submatrix2matrix_ellsort) ( bml_matrix_dense_t * A, bml_matrix_ellsort_t * B, int *core_halo_index, int lsize, int llsize, double threshold) { int A_N = A->N; #ifdef BML_USE_MAGMA REAL_T *A_matrix = bml_allocate_memory(sizeof(REAL_T) * A->N * A->N); MAGMA(getmatrix) (A->N, A->N, A->matrix, A->ld, (MAGMA_T *) A_matrix, A->N, bml_queue()); #else REAL_T *A_matrix = A->matrix; #endif int B_N = B->N; int B_M = B->M; int *B_nnz = B->nnz; int *B_index = B->index; REAL_T *B_value = B->value; int ii, icol; #pragma omp parallel for \ private(ii, icol) \ shared(core_halo_index) \ shared(A_N, A_matrix) \ shared(B_N, B_M, B_nnz, B_index, B_value) for (int ja = 0; ja < llsize; ja++) { ii = core_halo_index[ja]; icol = 0; for (int jb = 0; jb < lsize; jb++) { if (ABS(A_matrix[ROWMAJOR(ja, jb, A_N, A_N)]) > threshold) { B_index[ROWMAJOR(ii, icol, B_N, B_M)] = core_halo_index[jb]; B_value[ROWMAJOR(ii, icol, B_N, B_M)] = A_matrix[ROWMAJOR(ja, jb, A_N, A_N)]; icol++; } } if (icol > B_M) { LOG_ERROR("Number of non-zeroes per row >= M, Increase M\n"); } B_nnz[ii] = icol; } #ifdef BML_USE_MAGMA bml_free_memory(A_matrix); #endif } // Get matching vector of values void *TYPED_FUNC( bml_getVector_ellsort) ( bml_matrix_ellsort_t * A, int *jj, int irow, int colCnt) { REAL_T ZERO = 0.0; int A_N = A->N; int A_M = A->M; int *A_nnz = A->nnz; int *A_index = A->index; REAL_T *A_value = A->value; REAL_T *rvalue = bml_noinit_allocate_memory(colCnt * sizeof(REAL_T)); for (int i = 0; i < colCnt; i++) { for (int j = 0; j < A_nnz[irow]; j++) { if (A_index[ROWMAJOR(irow, j, A_N, A_M)] == jj[i]) { rvalue[i] = A_value[ROWMAJOR(irow, j, A_N, A_M)]; break; } rvalue[i] = ZERO; } } return rvalue; } /** Assemble matrix based on groups of rows from a matrix. * * \ingroup submatrix_group_C * * \param A Matrix A * \param hindex Indeces of nodes * \param ngroups Number of groups * \param threshold Threshold for graph */ bml_matrix_ellsort_t * TYPED_FUNC(bml_group_matrix_ellsort) (bml_matrix_ellsort_t * A, int *hindex, int ngroups, double threshold) { int A_N = A->N; int A_M = A->M; int *A_index = A->index; int *A_nnz = A->nnz; REAL_T *A_value = A->value; #if !(defined(__IBMC_) || defined(__ibmxl__)) int ix[ngroups]; memset(ix, 0, sizeof(int) * ngroups); #endif int hnode[A_N]; int hend; bml_matrix_dimension_t matrix_dimension = { ngroups, ngroups, ngroups }; bml_matrix_ellsort_t *B = TYPED_FUNC(bml_noinit_matrix_ellsort) (matrix_dimension, A->distribution_mode); int B_N = B->N; int B_M = B->M; int *B_index = B->index; int *B_nnz = B->nnz; REAL_T *B_value = B->value; #pragma omp parallel for \ private(hend) \ shared(hindex, hnode, A_N) for (int i = 0; i < ngroups; i++) { hend = hindex[i + 1] - 1; if (i == ngroups - 1) hend = A_N; for (int j = hindex[i] - 1; j < hend; j++) { hnode[j] = i; } } #if defined(__IBMC_) || defined(__ibmxl__) #pragma omp parallel for \ private(hend) \ shared(hindex, hnode) \ shared(A_nnz, A_index, A_value, A_N, A_M) \ shared(B_nnz, B_index, B_value, B_N, B_M) #else #pragma omp parallel for \ private(hend) \ shared(hindex, hnode) \ shared(A_nnz, A_index, A_value, A_N, A_M) \ shared(B_nnz, B_index, B_value, B_N, B_M) \ firstprivate(ix) #endif for (int i = 0; i < B_N; i++) { #if defined(__IBMC_) || defined(__ibmxl__) int ix[ngroups]; memset(ix, 0, sizeof(int) * ngroups); #endif B_nnz[i] = 0; hend = hindex[i + 1] - 1; if (i == B_N - 1) hend = A_N; for (int j = hindex[i] - 1; j < hend; j++) { for (int k = 0; k < A_nnz[j]; k++) { int ii = hnode[A_index[ROWMAJOR(j, k, A_N, A_M)]]; if (ix[ii] == 0 && is_above_threshold(A_value[ROWMAJOR(j, k, A_N, A_M)], threshold)) { ix[ii] = i + 1; B_index[ROWMAJOR(i, B_nnz[i], B_N, B_M)] = ii; B_value[ROWMAJOR(i, B_nnz[i], B_N, B_M)] = 1.0; B_nnz[i]++; } } } } return B; } /** Extract submatrix into new matrix of same format * * \ingroup submatrix_group_C * * \param A Matrix A to extract submatrix from * \param irow Index of first row to extract * \param icol Index of first column to extract * \param B_N Number of rows/columns to extract * \param B_M Max number of non-zero elemnts/row in exttacted matrix */ bml_matrix_ellsort_t * TYPED_FUNC(bml_extract_submatrix_ellsort) (bml_matrix_ellsort_t * A, int irow, int icol, int B_N, int B_M) { int A_N = A->N; int A_M = A->M; int *A_index = A->index; int *A_nnz = A->nnz; REAL_T *A_value = A->value; bml_matrix_ellsort_t *B; B = TYPED_FUNC(bml_zero_matrix_ellsort) (B_N, B_M, A->distribution_mode); int *B_index = B->index; int *B_nnz = B->nnz; REAL_T *B_value = B->value; // loop over subset of rows of A for (int i = irow; i < irow + B_N; i++) { for (int jp = 0; jp < A_nnz[i]; jp++) { int j = A_index[ROWMAJOR(i, jp, A_N, A_M)]; if (j >= icol && j < icol + B_N) { int iB = i - irow; B_index[ROWMAJOR(i - irow, B_nnz[iB], B_N, B_M)] = j - icol; B_value[ROWMAJOR(i - irow, B_nnz[iB], B_N, B_M)] = A_value[ROWMAJOR(i, jp, A_N, A_M)]; B_nnz[iB]++; } } } return B; } /** Assign a block B into matrix A * * \param A Matrix A * \param B Matrix B * \param irow First row where to insert block B * \param icol Offset column to insert block B */ void TYPED_FUNC( bml_assign_submatrix_ellsort) ( bml_matrix_ellsort_t * A, bml_matrix_ellsort_t * B, int irow, int icol) { int A_N = A->N; int A_M = A->M; int *A_index = A->index; int *A_nnz = A->nnz; REAL_T *A_value = A->value; int B_N = B->N; int B_M = B->M; int *B_index = B->index; int *B_nnz = B->nnz; REAL_T *B_value = B->value; // loop over rows of B for (int i = 0; i < B_N; i++) { for (int jp = 0; jp < B_nnz[i]; jp++) { int jB = B_index[ROWMAJOR(i, jp, B_N, B_M)]; int jpA = A_nnz[i + irow]; A_value[ROWMAJOR(i + irow, jpA, A_N, A_M)] = B_value[ROWMAJOR(i, jp, B_N, B_M)]; A_index[ROWMAJOR(i + irow, jpA, A_N, A_M)] = jB + icol; A_nnz[i + irow]++; } } }
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/ASTContext.h" #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; friend class ASTStmtWriter; /// Kind of the directive. OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { if (!Data) return llvm::None; return Data->getClauses(); } protected: /// Data, associated with the directive. OMPChildren *Data = nullptr; /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// OMPExecutableDirective(StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)) {} template <typename T, typename... Params> static T *createDirective(const ASTContext &C, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, unsigned NumChildren, Params &&... P) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(Clauses.size(), AssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::Create(reinterpret_cast<T *>(Mem) + 1, Clauses, AssociatedStmt, NumChildren); auto *Inst = new (Mem) T(std::forward<Params>(P)...); Inst->Data = Data; return Inst; } template <typename T, typename... Params> static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren, Params &&... P) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses, HasAssociatedStmt, NumChildren); auto *Inst = new (Mem) T(std::forward<Params>(P)...); Inst->Data = Data; return Inst; } template <typename T> static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses, HasAssociatedStmt, NumChildren); auto *Inst = new (Mem) T; Inst->Data = Data; return Inst; } public: /// Iterates over expressions/statements used in the construct. class used_clauses_child_iterator : public llvm::iterator_adaptor_base< used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator, std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> { ArrayRef<OMPClause *>::iterator End; OMPClause::child_iterator ChildI, ChildEnd; void MoveToNext() { if (ChildI != ChildEnd) return; while (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); if (ChildI != ChildEnd) return; } } } public: explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses) : used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); MoveToNext(); } } Stmt *operator*() const { return *ChildI; } Stmt *operator->() const { return **this; } used_clauses_child_iterator &operator++() { ++ChildI; if (ChildI != ChildEnd) return *this; if (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); } } MoveToNext(); return *this; } }; static llvm::iterator_range<used_clauses_child_iterator> used_clauses_children(ArrayRef<OMPClause *> Clauses) { return {used_clauses_child_iterator(Clauses), used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))}; } /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> const SpecificClause *getSingleClause() const { auto Clauses = getClausesOfKind<SpecificClause>(); if (Clauses.begin() != Clauses.end()) { assert(std::next(Clauses.begin()) == Clauses.end() && "There are at least 2 clauses of the specified kind"); return *Clauses.begin(); } return nullptr; } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { if (!Data) return 0; return Data->getNumClauses(); } /// Returns specified clause. /// /// \param I Number of clause. /// OMPClause *getClause(unsigned I) const { return clauses()[I]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return Data && Data->hasAssociatedStmt(); } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPExecutableDirective *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); return Data->getAssociatedStmt(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); return Data->getCapturedStmt(RegionKind, CaptureRegions); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); return Data->getInnermostCapturedStmt(CaptureRegions); } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!Data) return child_range(child_iterator(), child_iterator()); return Data->getAssociatedStmtAsRange(); } const_child_range children() const { return const_cast<OMPExecutableDirective *>(this)->children(); } ArrayRef<OMPClause *> clauses() const { if (!Data) return llvm::None; return Data->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const { return const_cast<OMPExecutableDirective *>(this)->getStructuredBlock(); } Stmt *getStructuredBlock(); const Stmt *getRawStmt() const { return const_cast<OMPExecutableDirective *>(this)->getRawStmt(); } Stmt *getRawStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); return Data->getRawStmt(); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPParallelDirective() : OMPExecutableDirective(OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Number of collapsed loops as specified by 'collapse' clause. unsigned CollapsedNum = 0; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length CollapsedNum are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { IterationVariableOffset = 0, LastIterationOffset = 1, CalcLastIterationOffset = 2, PreConditionOffset = 3, CondOffset = 4, InitOffset = 5, IncOffset = 6, PreInitsOffset = 7, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays). DefaultEnd = 8, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 8, LowerBoundVariableOffset = 9, UpperBoundVariableOffset = 10, StrideVariableOffset = 11, EnsureUpperBoundOffset = 12, NextLowerBoundOffset = 13, NextUpperBoundOffset = 14, NumIterationsOffset = 15, // Offset to the end for worksharing loop directives. WorksharingEnd = 16, PrevLowerBoundVariableOffset = 16, PrevUpperBoundVariableOffset = 17, DistIncOffset = 18, PrevEnsureUpperBoundOffset = 19, CombinedLowerBoundVariableOffset = 20, CombinedUpperBoundVariableOffset = 21, CombinedEnsureUpperBoundOffset = 22, CombinedInitOffset = 23, CombinedConditionOffset = 24, CombinedNextLowerBoundOffset = 25, CombinedNextUpperBoundOffset = 26, CombinedDistConditionOffset = 27, CombinedParForInDistConditionOffset = 28, // Offset to the end (and start of the following // counters/updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays) for combined distribute loop directives. CombinedDistributeEnd = 29, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind())]); return llvm::makeMutableArrayRef(Storage, CollapsedNum); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + CollapsedNum]); return llvm::makeMutableArrayRef(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum]); return llvm::makeMutableArrayRef(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum]); return llvm::makeMutableArrayRef(Storage, CollapsedNum); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum]); return llvm::makeMutableArrayRef(Storage, CollapsedNum); } /// Get the dependent counters storage. MutableArrayRef<Expr *> getDependentCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 5 * CollapsedNum]); return llvm::makeMutableArrayRef(Storage, CollapsedNum); } /// Get the dependent inits storage. MutableArrayRef<Expr *> getDependentInits() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 6 * CollapsedNum]); return llvm::makeMutableArrayRef(Storage, CollapsedNum); } /// Get the finals conditions storage. MutableArrayRef<Expr *> getFinalsConditions() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 7 * CollapsedNum]); return llvm::makeMutableArrayRef(Storage, CollapsedNum); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// OMPLoopDirective(StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPExecutableDirective(SC, Kind, StartLoc, EndLoc), CollapsedNum(CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 8 * CollapsedNum; // Counters, PrivateCounters, Inits, // Updates, Finals, DependentCounters, // DependentInits, FinalsConditions. } void setIterationVariable(Expr *IV) { Data->getChildren()[IterationVariableOffset] = IV; } void setLastIteration(Expr *LI) { Data->getChildren()[LastIterationOffset] = LI; } void setCalcLastIteration(Expr *CLI) { Data->getChildren()[CalcLastIterationOffset] = CLI; } void setPreCond(Expr *PC) { Data->getChildren()[PreConditionOffset] = PC; } void setCond(Expr *Cond) { Data->getChildren()[CondOffset] = Cond; } void setInit(Expr *Init) { Data->getChildren()[InitOffset] = Init; } void setInc(Expr *Inc) { Data->getChildren()[IncOffset] = Inc; } void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[IsLastIterVariableOffset] = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[LowerBoundVariableOffset] = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[UpperBoundVariableOffset] = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[StrideVariableOffset] = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[EnsureUpperBoundOffset] = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NextLowerBoundOffset] = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NextUpperBoundOffset] = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NumIterationsOffset] = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevLowerBoundVariableOffset] = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevUpperBoundVariableOffset] = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[DistIncOffset] = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevEnsureUpperBoundOffset] = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedLowerBoundVariableOffset] = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedUpperBoundVariableOffset] = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedEnsureUpperBoundOffset] = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedInitOffset] = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedConditionOffset] = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedNextLowerBoundOffset] = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedNextUpperBoundOffset] = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); Data->getChildren()[CombinedDistConditionOffset] = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); Data->getChildren()[CombinedParForInDistConditionOffset] = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); void setDependentCounters(ArrayRef<Expr *> A); void setDependentInits(ArrayRef<Expr *> A); void setFinalsConditions(ArrayRef<Expr *> A); public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// List of counters required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentCounters; /// List of initializers required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentInits; /// List of final conditions required for the generation of the /// non-rectangular loops. SmallVector<Expr *, 4> FinalsConditions; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the /// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions /// arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); DependentCounters.resize(Size); DependentInits.resize(Size); FinalsConditions.resize(Size); for (unsigned i = 0; i < Size; ++i) { Counters[i] = nullptr; PrivateCounters[i] = nullptr; Inits[i] = nullptr; Updates[i] = nullptr; Finals[i] = nullptr; DependentCounters[i] = nullptr; DependentInits[i] = nullptr; FinalsConditions[i] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getCollapsedNumber() const { return CollapsedNum; } Expr *getIterationVariable() const { return cast<Expr>(Data->getChildren()[IterationVariableOffset]); } Expr *getLastIteration() const { return cast<Expr>(Data->getChildren()[LastIterationOffset]); } Expr *getCalcLastIteration() const { return cast<Expr>(Data->getChildren()[CalcLastIterationOffset]); } Expr *getPreCond() const { return cast<Expr>(Data->getChildren()[PreConditionOffset]); } Expr *getCond() const { return cast<Expr>(Data->getChildren()[CondOffset]); } Expr *getInit() const { return cast<Expr>(Data->getChildren()[InitOffset]); } Expr *getInc() const { return cast<Expr>(Data->getChildren()[IncOffset]); } const Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } Stmt *getPreInits() { return Data->getChildren()[PreInitsOffset]; } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[IsLastIterVariableOffset]); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[LowerBoundVariableOffset]); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[UpperBoundVariableOffset]); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[StrideVariableOffset]); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[EnsureUpperBoundOffset]); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NextLowerBoundOffset]); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NextUpperBoundOffset]); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NumIterationsOffset]); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevLowerBoundVariableOffset]); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevUpperBoundVariableOffset]); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[DistIncOffset]); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevEnsureUpperBoundOffset]); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedLowerBoundVariableOffset]); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedUpperBoundVariableOffset]); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedEnsureUpperBoundOffset]); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedInitOffset]); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedConditionOffset]); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedNextLowerBoundOffset]); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedNextUpperBoundOffset]); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return cast<Expr>(Data->getChildren()[CombinedDistConditionOffset]); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return cast<Expr>(Data->getChildren()[CombinedParForInDistConditionOffset]); } /// Try to find the next loop sub-statement in the specified statement \p /// CurStmt. /// \param TryImperfectlyNestedLoops true, if we need to try to look for the /// imperfectly nested loop. static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt, bool TryImperfectlyNestedLoops); static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt, bool TryImperfectlyNestedLoops) { return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops); } Stmt *getBody(); const Stmt *getBody() const { return const_cast<OMPLoopDirective *>(this)->getBody(); } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); } ArrayRef<Expr *> dependent_counters() const { return const_cast<OMPLoopDirective *>(this)->getDependentCounters(); } ArrayRef<Expr *> dependent_inits() { return getDependentInits(); } ArrayRef<Expr *> dependent_inits() const { return const_cast<OMPLoopDirective *>(this)->getDependentInits(); } ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); } ArrayRef<Expr *> finals_conditions() const { return const_cast<OMPLoopDirective *>(this)->getFinalsConditions(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren(getCollapsedNumber(), llvm::omp::OMPD_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getCollapsedNumber(), llvm::omp::OMPD_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPForDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSectionsDirective() : OMPExecutableDirective(OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPSectionsDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSectionDirectiveClass, llvm::omp::OMPD_section, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(OMPSectionDirectiveClass, llvm::omp::OMPD_section, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSingleDirective() : OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, StartLoc, EndLoc), DirName(Name) {} /// Build an empty directive. /// explicit OMPCriticalDirective() : OMPExecutableDirective(OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, SourceLocation(), SourceLocation()) {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current region has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren(getCollapsedNumber(), llvm::omp::OMPD_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getCollapsedNumber(), llvm::omp::OMPD_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master' directive. /// /// \code /// #pragma omp parallel master private(a,b) /// \endcode /// In this example directive '#pragma omp parallel master' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPParallelMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, StartLoc, EndLoc) {} explicit OMPParallelMasterDirective() : OMPExecutableDirective(OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// static OMPParallelMasterDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelMasterDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelMasterDirective *>(this) ->getTaskReductionRefExpr(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPParallelSectionsDirective() : OMPExecutableDirective(OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelSectionsDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if this directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskDirective() : OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task, SourceLocation(), SourceLocation()) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskgroupDirective() : OMPExecutableDirective(OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, SourceLocation(), SourceLocation()) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { Data->getChildren()[0] = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return const_cast<OMPTaskgroupDirective *>(this)->getReductionRef(); } Expr *getReductionRef() { return cast_or_null<Expr>(Data->getChildren()[0]); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPFlushDirective() : OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp depobj' directive. /// /// \code /// #pragma omp depobj(a) depend(in:x,y) /// \endcode /// In this example directive '#pragma omp depobj' initializes a depobj object /// 'a' with dependence type 'in' and a list with 'x' and 'y' locators. class OMPDepobjDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPDepobjDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPDepobjDirective() : OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPDepobjDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPDepobjDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDepobjDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPOrderedDirective() : OMPExecutableDirective(OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// \param IsStandalone true, if the the standalone directive is created. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, bool IsStandalone, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart = false; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPAtomicDirective() : OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, SourceLocation(), SourceLocation()) {} /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { Data->getChildren()[0] = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { Data->getChildren()[1] = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { Data->getChildren()[2] = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { Data->getChildren()[3] = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getX() const { return cast_or_null<Expr>(Data->getChildren()[0]); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>(Data->getChildren()[1]); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>(Data->getChildren()[1]); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(Data->getChildren()[2]); } const Expr *getV() const { return cast_or_null<Expr>(Data->getChildren()[2]); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(Data->getChildren()[3]); } const Expr *getExpr() const { return cast_or_null<Expr>(Data->getChildren()[3]); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetDirective() : OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetDataDirective() : OMPExecutableDirective(OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetEnterDataDirective() : OMPExecutableDirective(OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetExitDataDirective() : OMPExecutableDirective(OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetParallelDirective() : OMPExecutableDirective(OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetParallelDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current region has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getCollapsedNumber(), llvm::omp::OMPD_target_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getCollapsedNumber(), llvm::omp::OMPD_target_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTeamsDirective() : OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// statements and child expressions. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, StartLoc, EndLoc) {} /// Build an empty directive. explicit OMPCancellationPointDirective() : OMPExecutableDirective(OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, SourceLocation(), SourceLocation()) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPCancelDirective() : OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, SourceLocation(), SourceLocation()) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp master taskloop' directive. /// /// \code /// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp master taskloop simd' directive. /// /// \code /// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop simd' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \p NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop' directive. /// /// \code /// #pragma omp parallel master taskloop private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop simd' directive. /// /// \code /// #pragma omp parallel master taskloop simd private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop simd' has /// clauses 'private' with the variables 'a' and 'b', 'grainsize' with /// expression 'val' and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetUpdateDirective() : OMPExecutableDirective(OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getCollapsedNumber(), llvm::omp::OMPD_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getCollapsedNumber(), llvm::omp::OMPD_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getCollapsedNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getCollapsedNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTeamsDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, StartLoc, EndLoc) { } /// Build an empty directive. /// explicit OMPTargetTeamsDirective() : OMPExecutableDirective(OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getCollapsedNumber(), llvm::omp::OMPD_target_teams_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getCollapsedNumber(), llvm::omp::OMPD_target_teams_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetTeamsDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective( OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum) : OMPLoopDirective( OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' with /// list item 'a'. class OMPScanDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPScanDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPScanDirective() : OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPScanDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPScanDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPScanDirectiveClass; } }; } // end namespace clang #endif
ast-dump-openmp-flush.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(void) { #pragma omp flush } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-flush.c:3:1, line:5:1> line:3:6 test 'void (void)' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:5:1> // CHECK-NEXT: `-OMPFlushDirective {{.*}} <line:4:1, col:18> openmp_standalone_directive
entities_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_ENTITIES_UTILITIES) #define KRATOS_ENTITIES_UTILITIES // System includes // External includes // Project includes #include "includes/model_part.h" namespace Kratos { /** * @namespace EntitiesUtilities * @ingroup KratosCore * @brief This namespace includes several utilities necessaries for the computation of entities functions in a efficient way * @author Vicente Mataix Ferrandiz */ namespace EntitiesUtilities { /** * @brief This method initializes all the active entities (conditions, elements, constraints) * @param rModelPart The model part of the problem to solve */ void KRATOS_API(KRATOS_CORE) InitializeAllEntities(ModelPart& rModelPart); /** * @brief This method initializes all the active entities * @param rModelPart The model part of the problem to solve */ template<class TEntityType> KRATOS_API(KRATOS_CORE) PointerVectorSet<TEntityType, IndexedObject>& GetEntities(ModelPart& rModelPart); /** * @brief This method initializes all the active entities * @param rModelPart The model part of the problem to solve */ template<class TEntityType> void InitializeEntities(ModelPart& rModelPart) { KRATOS_TRY // The number of entities auto& r_entities_array = GetEntities<TEntityType>(rModelPart); const int number_of_entities = static_cast<int>(r_entities_array.size()); const auto it_ent_begin = r_entities_array.begin(); // The current process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Initialize #pragma omp parallel { #pragma omp for schedule(guided, 512) for (int i_ent = 0; i_ent < number_of_entities; ++i_ent) { auto it_ent = it_ent_begin + i_ent; // Detect if the entity is active or not. If the user did not make any choice the entity // It is active by default const bool entity_is_active = (it_ent->IsDefined(ACTIVE)) ? it_ent->Is(ACTIVE) : true; if (entity_is_active) { it_ent->Initialize(r_current_process_info); } } } KRATOS_CATCH("") } ///@} ///@name Private Acces ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // namespace EntitiesUtilities } // namespace Kratos #endif /* KRATOS_ENTITIES_UTILITIES defined */
GB_unaryop__ainv_int16_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_int16 // op(A') function: GB_tran__ainv_int16_int16 // C type: int16_t // A type: int16_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_int16 ( int16_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fc_hcl_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qtang@openailab.com */ #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include "fc_param.h" #include <math.h> #if __SSE2__ #include <emmintrin.h> #endif #if __AVX__ #include <immintrin.h> #endif struct fc_data { int need_trans; int batch; // N int out_number; // OUT int hidden; // hidden int zero[3]; // input, kernel, output float scale[3]; // input, kernel, output }; static int innerproduct(int inn, int inc, int inh, int inw, int outc, const float* weight, const float* input, float* output, const float* _bias, int num_thread, int cpu_affinity) { size_t elemsize = sizeof(float); int size = inw * inh; for (int n = 0; n < inn; n++) { #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outc; p++) { int q = 0; float sum = _bias ? _bias[p] : 0.f; const float* weight1 = weight + p * inc * size; const float* input1 = input + n * inc * size; #if __AVX__ || __SSE__ #if __SSE__ float _sum[4] = {0.f}; __m128 _sum0 = _mm_set1_ps(0.f); for (; q + 3 < inc * size; q = q + 4) { __m128 _input = _mm_loadu_ps(input1 + q); __m128 _weight = _mm_loadu_ps(weight1 + q); __m128 _sum1 = _mm_mul_ps(_input, _weight); _sum0 = _mm_add_ps(_sum0, _sum1); } _mm_storeu_ps(_sum, _sum0); float tmp = _sum[0] + _sum[1] + _sum[2] + _sum[3]; sum = sum + tmp; #else //__AVX__ // TODO #endif #endif for (; q < inc * size; q++) { float tmp = input1[q] * weight1[q]; sum = sum + tmp; } output[n * outc + p] = sum; } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct fc_data* op_param = ( struct fc_data* )sys_malloc(sizeof(struct fc_data)); memset(op_param, 0, sizeof(struct fc_data)); exec_node->ops_priv = op_param; return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { sys_free(exec_node->ops_priv); return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* weight_tensor; struct ir_tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct fc_param* param = ( struct fc_param* )ir_node->op.param_mem; struct fc_data* op_param = ( struct fc_data* )exec_node->ops_priv; if (ir_graph->graph_layout == TENGINE_LAYOUT_NCHW) { int hidden = input_tensor->dims[1]; if (input_tensor->dim_num > 2) hidden = hidden * input_tensor->dims[2]; if (input_tensor->dim_num > 3) hidden = hidden * input_tensor->dims[3]; op_param->hidden = hidden; } else { int hidden = 0; if (input_tensor->dim_num == 2) hidden = input_tensor->dims[1]; if (input_tensor->dim_num == 3) hidden = input_tensor->dims[1] * input_tensor->dims[2]; if (input_tensor->dim_num == 4) hidden = input_tensor->dims[1] * input_tensor->dims[2] * input_tensor->dims[3]; op_param->hidden = hidden; } op_param->batch = input_tensor->dims[0]; op_param->out_number = param->num_output; int weight_out = weight_tensor->dims[0]; if (weight_out == op_param->out_number) op_param->need_trans = 0; else op_param->need_trans = 1; return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* weight_tensor; struct ir_tensor* bias_tensor; struct ir_tensor* output_tensor; int num_thread = exec_graph->num_thread; int cpu_affinity = exec_graph->cpu_affinity; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct fc_param* param = ( struct fc_param* )ir_node->op.param_mem; struct fc_data* op_param = ( struct fc_data* )exec_node->ops_priv; const void* input_data = input_tensor->data; void* weight_data = weight_tensor->data; void* output_data = output_tensor->data; int batch_number = input_tensor->dims[0]; int inc = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outc = output_tensor->dims[1]; void* bias_data = NULL; if (ir_node->input_num > 2) { bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]); bias_data = bias_tensor->data; } if (innerproduct(batch_number, inc, inh, inw, outc, weight_data, input_data, output_data, bias_data, num_thread, cpu_affinity) < 0) return -1; return 0; } static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* node = exec_node->ir_node; struct ir_graph* graph = node->graph; struct ir_tensor* input = get_ir_graph_tensor(graph, node->input_tensors[0]); struct ir_tensor* weight = get_ir_graph_tensor(graph, node->input_tensors[1]); struct ir_tensor* output = get_ir_graph_tensor(graph, node->output_tensors[0]); int dim[4]; int n = weight->dims[0]; int k = weight->dims[1]; int m = input->dims[0]; int input_k = input->dims[1]; if (input->dim_num == 2) { dim[0] = m; dim[1] = n; } else if (input->dim_num == 3) { input_k *= input->dims[2]; if (graph->graph_layout == TENGINE_LAYOUT_NHWC) { dim[0] = m; dim[1] = 1; dim[2] = n; } else { dim[0] = m; dim[1] = n; dim[2] = 1; } } else if (input->dim_num == 4) { input_k *= input->dims[2] * input->dims[3]; if (graph->graph_layout == TENGINE_LAYOUT_NHWC) { dim[0] = m; dim[1] = 1; dim[2] = 1; dim[3] = n; } else { dim[0] = m; dim[1] = n; dim[2] = 1; dim[3] = 1; } } else return -1; if (k != input_k) { TLOG_ERR("fc: input tensor and weight tensor shape does not match, hidden_number: %d\n", k); set_tengine_errno(EFAULT); return -1; } int ret = set_ir_tensor_shape(output, dim, input->dim_num); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { struct ir_node* ir_node = exec_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); /* todo support uint8 */ if (input_tensor->data_type != TENGINE_DT_FP32) return 0; return OPS_SCORE_BEST; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_fc_hcl_ops(void* arg) { return register_builtin_node_ops(OP_FC, &hcl_node_ops); } static int unreg_fc_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_FC, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_fc_hcl_ops); AUTO_UNREGISTER_OPS(unreg_fc_hcl_ops);
iscl_hack.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <complex.h> #include <math.h> #include <float.h> #include <time.h> #include <fftw/fftw3.h> #include <ipps.h> #include "iscl_hack.h" /*! * These are some hacked routines from ISTI's ISCL. They are copyrighted * under the Apache 2 license. */ //enum isclError_enum int fft_rfftfreqs64f_work_hack(const int n, const double dt, const int lenf, double *__restrict__ freqs) { double xden; int i, nlim; /* isclReturnArrayTooSmallError("n", n, 1); isclReturnNullPointerError("freqs", freqs); isclReturnArrayTooSmallError("lenf", lenf, n/2+1); */ if (lenf < n/2+1) { fprintf(stderr, "%s: lenf=%d must be at least %d\n", __func__, lenf, n/2+1); return -1; } if (dt <= 0.0) { fprintf(stderr, "%s: invalid sample spacing=%e!\n", __func__, dt); return -1; /* isclPrintError("Invalid sample spacing=%e!\n", dt); if (array_zeros64f_work(lenf, freqs) != ISCL_SUCCESS) { isclPrintError("Error zeroing out freqs\n"); } return ISCL_INVALID_INPUT; */ } // Easy case if (n == 1) { freqs[0] = 0.0; return 0; //ISCL_SUCCESS; } // Set denominator nlim = n/2 + 1; xden = 1.0/(dt*(double) n); #pragma omp simd for (i=0; i<nlim; i++) { freqs[i] = (double) i*xden; } return 0; //ISCL_SUCCESS; } //enum isclError_enum int fft_irfft64z_work_hack(const int nx, const double complex *__restrict__ x, const int n, double *__restrict__ y) { fftw_plan p; fftw_complex *in; double *out, xnorm; int i, ntf; double complex zero = 0.0; /* isclReturnArrayTooSmallError("n", n, 1); isclReturnArrayTooSmallError("nx", nx, 1); isclReturnNullPointerError("x", x); isclReturnNullPointerError("y", y); */ ntf = n/2 + 1; in = (fftw_complex *)fftw_malloc(sizeof(fftw_complex)*(size_t) ntf); out = y; //(double *)fftw_malloc(sizeof(double)*(size_t) n); p = fftw_plan_dft_c2r_1d(n, in, out, FFTW_ESTIMATE); // Equal size transforms if (nx == ntf) { for (i=0; i<nx; i++) { in[i] = x[i]; } } // Truncate x to length of output array y else if (nx > ntf) { for (i=0; i<ntf; i++) { in[i] = x[i]; } } // Pad to length of transform else { for (i=0; i<nx; i++) { in[i] = x[i]; } for (i=nx; i<ntf; i++) { in[i] = zero; } } // Transform fftw_execute(p); // Copy and normalize xnorm = 1.0/(double) n; for (i=0; i<n; i++){y[i] = y[i]*xnorm;} //cblas_dscal(n, xnorm, y, 1); // Clean fftw_destroy_plan(p); fftw_free(in); //if (!__iscl_isinit()){fftw_cleanup();} in = NULL; out = NULL; return 0; //ISCL_SUCCESS; } //enum isclError_enum int fft_fftshift64f_work_hack(const int n, const double *__restrict__ x, double *__restrict__ xshift) { //double *work; int i1, jf, ncopy1, ncopy2; int ierr; //enum isclError_enum ierr; ierr = 0; /* isclReturnArrayTooSmallError("n", n, 1); isclReturnNullPointerError("x", x); isclReturnNullPointerError("xshift", xshift); */ // Handle base cases explictly if (n == 1) { xshift[0] = x[0]; return 0; //ISCL_SUCCESS; } if (n == 2) { xshift[0] = x[1]; xshift[1] = x[0]; return 0; //ISCL_SUCCESS; } // Do the general problem i1 = n/2; if (n%2 == 1) { i1 = n/2 + 1; ncopy1 = n - i1; // Tail shift ncopy2 = i1; jf = i1 - 1; } else { i1 = n/2; ncopy1 = n/2; ncopy2 = n/2; jf = i1; } // Copy second half of x to xshift //ierr = array_copy64f_work(ncopy1, &x[i1], xshift); ippsCopy_64f(&x[i1], xshift, ncopy1); if (ierr != 0) //ISCL_SUCCESS) { fprintf(stderr, "%s: error in initial copy\n", __func__); //isclPrintError("%s", "Error in initial copy"); return ierr; } // Copy first half of x to second half of xshift //ierr = array_copy64f_work(ncopy2, x, &xshift[jf]); ippsCopy_64f(x, &xshift[jf], ncopy2); if (ierr != 0) //ISCL_SUCCESS) { fprintf(stderr, "%s: error in second copy\n", __func__); //isclPrintError("%s", "Error in second copy"); return ierr; } return ierr; } //============================================================================// //enum isclError_enum int fft_rfft64f_work_hack(const int nx, const double *__restrict__ x, const int n, const int ny, double complex *__restrict__ y) { fftw_complex *out; double *in; fftw_plan p; int i;//, ntf; int ierr; //enum isclError_enum ierr; const double zero = 0.0; //------------------------------------------------------------------------// // // Size checking ierr = 0; //ISCL_SUCCESS; //ntf = n/2 + 1; //isclReturnArrayTooSmallError("n", n, 1); //isclReturnArrayTooSmallError("nx", nx, 1); //isclReturnArrayTooSmallError("ny", ny, ntf); //isclReturnNullPointerError("x", x); //isclReturnNullPointerError("y", y); // Set space and make plan in = (double *)fftw_malloc(sizeof(double)*(size_t) n); out = y; //(fftw_complex *)fftw_malloc(sizeof(fftw_complex)*(size_t) ntf); p = fftw_plan_dft_r2c_1d(n, in, out, FFTW_ESTIMATE); // Equal size transforms if (nx == n) { #pragma omp simd for (i=0; i<n; i++) { in[i] = x[i]; } } // Truncate x to length of output array y else if (nx > n) { #pragma omp simd for (i=0; i<n; i++) { in[i] = x[i]; } } // Pad x to length of output array y else if (nx < n) { #pragma omp simd for (i=0; i<nx; i++) { in[i] = x[i]; } #pragma omp simd for (i=nx; i<n; i++) { in[i] = zero; } } else { fprintf(stderr, "%s: Could not classify job (nx,ny)=(%d,%d)", __func__, nx, ny); //isclPrintError("Could not classify job (nx,ny)=(%d,%d)", nx, ny); fftw_destroy_plan(p); ierr =-1; //ISCL_ALGORITHM_FAILURE; goto ERROR; } // Transform fftw_execute(p); // Free plan and data fftw_destroy_plan(p); fftw_free(in); //if (!__iscl_isinit()){fftw_cleanup();} // Assumed we'll clean this later // don't clean up in = NULL; out = NULL; return 0; //ISCL_SUCCESS; // In case of error set output to zero ERROR:; /* if (array_zeros64z_work(ny, y) != ISCL_SUCCESS) { isclPrintError("%s", "Error nullying out y"); } */ return ierr; }